max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
feed/serializers/extensions.py | cul-it/arxiv-rss | 4 | 800 | <reponame>cul-it/arxiv-rss
"""Classes derived from the Feedgen extension classes."""
from typing import Dict, List, Optional
from lxml import etree
from lxml.etree import Element
from flask import current_app
from feedgen.ext.base import BaseEntryExtension, BaseExtension
from feed.domain import Author, Media
class ArxivExtension(BaseExtension):
"""Extension of the Feedgen class to allow us to change its behavior."""
def extend_atom(self: BaseExtension, atom_feed: Element) -> Element:
"""Allow the extension to modify the initial feed tree for Atom.
Parameters
----------
atom_feed : Element
The feed's root element.
Returns
-------
atom_feed : Element
The feed's root element.
"""
return atom_feed
def extend_rss(self: BaseExtension, rss_feed: Element) -> Element:
"""Allow the extension to modify the initial feed tree for RSS.
Parameters
----------
rss_feed : Element
The feed's root element.
Returns
-------
rss_feed : Element
The feed's root element.
"""
return rss_feed
def extend_ns(self: BaseExtension) -> Dict[str, str]:
"""
Define the feed's namespaces.
Returns
-------
namespaces : Dict[str, str]
Definitions of the "arxiv" namespaces.
"""
return {
"arxiv": "http://arxiv.org/schemas/atom",
"content": "http://purl.org/rss/1.0/modules/content/",
"taxo": "http://purl.org/rss/1.0/modules/taxonomy/",
"syn": "http://purl.org/rss/1.0/modules/syndication/",
"admin": "http://webns.net/mvcb/",
"media": "http://search.yahoo.com/mrss",
}
class ArxivAtomExtension(BaseEntryExtension):
"""Atom only extension."""
def extend_ns(self: BaseExtension) -> Dict[str, str]:
"""
Define the feed's namespaces.
Returns
-------
namespaces : Dict[str, str]
Definitions of the "arxiv" namespaces.
"""
return {
"arxiv": "http://arxiv.org/schemas/atom",
}
class ArxivEntryExtension(BaseEntryExtension):
"""Extension of the Entry class to allow us to change its behavior."""
def __init__(self: BaseEntryExtension):
"""Initialize the member values to all be empty."""
self.__arxiv_authors: List[Author] = []
self.__arxiv_media: List[Media] = []
self.__arxiv_comment: Optional[str] = None
self.__arxiv_primary_category: Optional[str] = None
self.__arxiv_doi: Optional[dict] = None
self.__arxiv_affiliation: Optional[str] = None
self.__arxiv_journal_ref: Optional[str] = None
self.__arxiv_affiliations: Dict = {}
def __add_media(self, entry: Element) -> None:
for media in self.__arxiv_media:
group = etree.SubElement(
entry, "{http://search.yahoo.com/mrss}group"
)
title = etree.SubElement(
group, "{http://search.yahoo.com/mrss}title"
)
title.text = media.title
etree.SubElement(
group,
"{http://search.yahoo.com/mrss}content",
attrib={"url": media.url, "type": media.type},
)
def extend_atom(self, entry: Element) -> Element:
"""
Allow the extension to modify the entry element for Atom serialization.
Parameters
----------
entry : Element
The FeedEntry to modify.
Returns
-------
entry : Element
The modified entry.
"""
if self.__arxiv_comment:
comment_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}comment"
)
comment_element.text = self.__arxiv_comment
if self.__arxiv_primary_category:
etree.SubElement(
entry,
"{http://arxiv.org/schemas/atom}primary_category",
attrib=self.__arxiv_primary_category,
)
if self.__arxiv_journal_ref:
journal_ref_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}journal_ref"
)
journal_ref_element.text = self.__arxiv_journal_ref
if self.__arxiv_doi:
for doi in self.__arxiv_doi:
doi_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}doi"
)
doi_element.text = doi
# Check each of the entry's author nodes
for entry_child in entry:
if entry_child.tag == "author":
author = entry_child
for author_child in author:
# If the author's name is in the affiliation dictionary,
# add Elements for all of its affiliations.
if author_child.tag == "name":
name = author_child.text
affiliations = self.__arxiv_affiliations.get(name, [])
for affiliation in affiliations:
element = etree.SubElement(
author,
"{http://arxiv.org/schemas/atom}affiliation",
)
element.text = affiliation
self.__add_media(entry=entry)
return entry
def extend_rss(self, entry: Element) -> Element:
"""Allow the extension to modify the entry element for RSS.
Parameters
----------
entry : Element
The FeedEntry to modify.
Returns
-------
entry : Element
The modified entry.
"""
base_server: str = current_app.config["BASE_SERVER"]
for entry_child in entry:
if entry_child.tag == "description":
description = "<p>Authors: "
first = True
for author in self.__arxiv_authors:
if first:
first = False
else:
description += ", "
name = (
f"{author.last_name},"
f"+{author.initials.replace(' ', '+')}"
)
description += (
f'<a href="http://{base_server}/search/?query={name}&'
f'searchtype=author">{author.full_name}</a>'
)
description += f"</p><p>{entry_child.text}</p>"
entry_child.text = description
self.__add_media(entry=entry)
return entry
def author(self, author: Author) -> None:
"""Add an author value to this entry.
Parameters
----------
author : Author
Paper author.
"""
self.__arxiv_authors.append(author)
def media(self, media: Media) -> None:
"""Add a media item.
Parameters
----------
media: Dict[str, str]
Dictionary with url and type attributes.
"""
self.__arxiv_media.append(media)
def comment(self, text: str) -> None:
"""Assign the comment value to this entry.
Parameters
----------
text : str
The new comment text.
"""
self.__arxiv_comment = text
def primary_category(self, text: str) -> None:
"""Assign the primary_category value to this entry.
Parameters
----------
text : str
The new primary_category name.
"""
self.__arxiv_primary_category = text
def journal_ref(self, text: str) -> None:
"""Assign the journal_ref value to this entry.
Parameters
----------
text : str
The new journal_ref value.
"""
self.__arxiv_journal_ref = text
def doi(self, doi_list: Dict[str, str]) -> None:
"""Assign the set of DOI definitions for this entry.
Parameters
----------
doi_list : Dict[str, str]
A dictionary of DOI assignments.
"""
self.__arxiv_doi = doi_list
def affiliation(self, full_name: str, affiliations: List[str]) -> None:
"""Assign an affiliation for one author of this entry.
Parameters
----------
full_name : str
An author's full name.
affiliations : List[str]
The code for the author's affiliated institution.
"""
self.__arxiv_affiliations[full_name] = affiliations
| """Classes derived from the Feedgen extension classes."""
from typing import Dict, List, Optional
from lxml import etree
from lxml.etree import Element
from flask import current_app
from feedgen.ext.base import BaseEntryExtension, BaseExtension
from feed.domain import Author, Media
class ArxivExtension(BaseExtension):
"""Extension of the Feedgen class to allow us to change its behavior."""
def extend_atom(self: BaseExtension, atom_feed: Element) -> Element:
"""Allow the extension to modify the initial feed tree for Atom.
Parameters
----------
atom_feed : Element
The feed's root element.
Returns
-------
atom_feed : Element
The feed's root element.
"""
return atom_feed
def extend_rss(self: BaseExtension, rss_feed: Element) -> Element:
"""Allow the extension to modify the initial feed tree for RSS.
Parameters
----------
rss_feed : Element
The feed's root element.
Returns
-------
rss_feed : Element
The feed's root element.
"""
return rss_feed
def extend_ns(self: BaseExtension) -> Dict[str, str]:
"""
Define the feed's namespaces.
Returns
-------
namespaces : Dict[str, str]
Definitions of the "arxiv" namespaces.
"""
return {
"arxiv": "http://arxiv.org/schemas/atom",
"content": "http://purl.org/rss/1.0/modules/content/",
"taxo": "http://purl.org/rss/1.0/modules/taxonomy/",
"syn": "http://purl.org/rss/1.0/modules/syndication/",
"admin": "http://webns.net/mvcb/",
"media": "http://search.yahoo.com/mrss",
}
class ArxivAtomExtension(BaseEntryExtension):
"""Atom only extension."""
def extend_ns(self: BaseExtension) -> Dict[str, str]:
"""
Define the feed's namespaces.
Returns
-------
namespaces : Dict[str, str]
Definitions of the "arxiv" namespaces.
"""
return {
"arxiv": "http://arxiv.org/schemas/atom",
}
class ArxivEntryExtension(BaseEntryExtension):
"""Extension of the Entry class to allow us to change its behavior."""
def __init__(self: BaseEntryExtension):
"""Initialize the member values to all be empty."""
self.__arxiv_authors: List[Author] = []
self.__arxiv_media: List[Media] = []
self.__arxiv_comment: Optional[str] = None
self.__arxiv_primary_category: Optional[str] = None
self.__arxiv_doi: Optional[dict] = None
self.__arxiv_affiliation: Optional[str] = None
self.__arxiv_journal_ref: Optional[str] = None
self.__arxiv_affiliations: Dict = {}
def __add_media(self, entry: Element) -> None:
for media in self.__arxiv_media:
group = etree.SubElement(
entry, "{http://search.yahoo.com/mrss}group"
)
title = etree.SubElement(
group, "{http://search.yahoo.com/mrss}title"
)
title.text = media.title
etree.SubElement(
group,
"{http://search.yahoo.com/mrss}content",
attrib={"url": media.url, "type": media.type},
)
def extend_atom(self, entry: Element) -> Element:
"""
Allow the extension to modify the entry element for Atom serialization.
Parameters
----------
entry : Element
The FeedEntry to modify.
Returns
-------
entry : Element
The modified entry.
"""
if self.__arxiv_comment:
comment_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}comment"
)
comment_element.text = self.__arxiv_comment
if self.__arxiv_primary_category:
etree.SubElement(
entry,
"{http://arxiv.org/schemas/atom}primary_category",
attrib=self.__arxiv_primary_category,
)
if self.__arxiv_journal_ref:
journal_ref_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}journal_ref"
)
journal_ref_element.text = self.__arxiv_journal_ref
if self.__arxiv_doi:
for doi in self.__arxiv_doi:
doi_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}doi"
)
doi_element.text = doi
# Check each of the entry's author nodes
for entry_child in entry:
if entry_child.tag == "author":
author = entry_child
for author_child in author:
# If the author's name is in the affiliation dictionary,
# add Elements for all of its affiliations.
if author_child.tag == "name":
name = author_child.text
affiliations = self.__arxiv_affiliations.get(name, [])
for affiliation in affiliations:
element = etree.SubElement(
author,
"{http://arxiv.org/schemas/atom}affiliation",
)
element.text = affiliation
self.__add_media(entry=entry)
return entry
def extend_rss(self, entry: Element) -> Element:
"""Allow the extension to modify the entry element for RSS.
Parameters
----------
entry : Element
The FeedEntry to modify.
Returns
-------
entry : Element
The modified entry.
"""
base_server: str = current_app.config["BASE_SERVER"]
for entry_child in entry:
if entry_child.tag == "description":
description = "<p>Authors: "
first = True
for author in self.__arxiv_authors:
if first:
first = False
else:
description += ", "
name = (
f"{author.last_name},"
f"+{author.initials.replace(' ', '+')}"
)
description += (
f'<a href="http://{base_server}/search/?query={name}&'
f'searchtype=author">{author.full_name}</a>'
)
description += f"</p><p>{entry_child.text}</p>"
entry_child.text = description
self.__add_media(entry=entry)
return entry
def author(self, author: Author) -> None:
"""Add an author value to this entry.
Parameters
----------
author : Author
Paper author.
"""
self.__arxiv_authors.append(author)
def media(self, media: Media) -> None:
"""Add a media item.
Parameters
----------
media: Dict[str, str]
Dictionary with url and type attributes.
"""
self.__arxiv_media.append(media)
def comment(self, text: str) -> None:
"""Assign the comment value to this entry.
Parameters
----------
text : str
The new comment text.
"""
self.__arxiv_comment = text
def primary_category(self, text: str) -> None:
"""Assign the primary_category value to this entry.
Parameters
----------
text : str
The new primary_category name.
"""
self.__arxiv_primary_category = text
def journal_ref(self, text: str) -> None:
"""Assign the journal_ref value to this entry.
Parameters
----------
text : str
The new journal_ref value.
"""
self.__arxiv_journal_ref = text
def doi(self, doi_list: Dict[str, str]) -> None:
"""Assign the set of DOI definitions for this entry.
Parameters
----------
doi_list : Dict[str, str]
A dictionary of DOI assignments.
"""
self.__arxiv_doi = doi_list
def affiliation(self, full_name: str, affiliations: List[str]) -> None:
"""Assign an affiliation for one author of this entry.
Parameters
----------
full_name : str
An author's full name.
affiliations : List[str]
The code for the author's affiliated institution.
"""
self.__arxiv_affiliations[full_name] = affiliations | en | 0.58451 | Classes derived from the Feedgen extension classes. Extension of the Feedgen class to allow us to change its behavior. Allow the extension to modify the initial feed tree for Atom. Parameters ---------- atom_feed : Element The feed's root element. Returns ------- atom_feed : Element The feed's root element. Allow the extension to modify the initial feed tree for RSS. Parameters ---------- rss_feed : Element The feed's root element. Returns ------- rss_feed : Element The feed's root element. Define the feed's namespaces. Returns ------- namespaces : Dict[str, str] Definitions of the "arxiv" namespaces. Atom only extension. Define the feed's namespaces. Returns ------- namespaces : Dict[str, str] Definitions of the "arxiv" namespaces. Extension of the Entry class to allow us to change its behavior. Initialize the member values to all be empty. Allow the extension to modify the entry element for Atom serialization. Parameters ---------- entry : Element The FeedEntry to modify. Returns ------- entry : Element The modified entry. # Check each of the entry's author nodes # If the author's name is in the affiliation dictionary, # add Elements for all of its affiliations. Allow the extension to modify the entry element for RSS. Parameters ---------- entry : Element The FeedEntry to modify. Returns ------- entry : Element The modified entry. Add an author value to this entry. Parameters ---------- author : Author Paper author. Add a media item. Parameters ---------- media: Dict[str, str] Dictionary with url and type attributes. Assign the comment value to this entry. Parameters ---------- text : str The new comment text. Assign the primary_category value to this entry. Parameters ---------- text : str The new primary_category name. Assign the journal_ref value to this entry. Parameters ---------- text : str The new journal_ref value. Assign the set of DOI definitions for this entry. Parameters ---------- doi_list : Dict[str, str] A dictionary of DOI assignments. Assign an affiliation for one author of this entry. Parameters ---------- full_name : str An author's full name. affiliations : List[str] The code for the author's affiliated institution. | 2.423803 | 2 |
discovery-infra/test_infra/helper_classes/config/controller_config.py | lranjbar/assisted-test-infra | 0 | 801 | <reponame>lranjbar/assisted-test-infra
from abc import ABC
from pathlib import Path
from typing import Any
from dataclasses import dataclass
from test_infra import consts
from test_infra.utils.global_variables import GlobalVariables
from .base_config import _BaseConfig
global_variables = GlobalVariables()
@dataclass
class BaseNodeConfig(_BaseConfig, ABC):
platform: str = None
is_ipv6: bool = None
bootstrap_in_place: bool = None
private_ssh_key_path: Path = None
working_dir: str = consts.WORKING_DIR
master_memory: int = None
master_vcpu: int = None
masters_count: int = None
nodes_count: int = None
master_cpu_mode: str = None
master_disk: int = None # disk size in MB.
master_disk_size_gib: str = None # disk size in GB.
master_disk_count: int = None # number of disks to create
worker_memory: int = None
worker_vcpu: int = None
workers_count: int = None
worker_cpu_mode: str = None
worker_disk: int = None
worker_disk_count: int = None
network_mtu: int = None
@staticmethod
def get_default(key, default=None) -> Any:
return getattr(global_variables, key)
| from abc import ABC
from pathlib import Path
from typing import Any
from dataclasses import dataclass
from test_infra import consts
from test_infra.utils.global_variables import GlobalVariables
from .base_config import _BaseConfig
global_variables = GlobalVariables()
@dataclass
class BaseNodeConfig(_BaseConfig, ABC):
platform: str = None
is_ipv6: bool = None
bootstrap_in_place: bool = None
private_ssh_key_path: Path = None
working_dir: str = consts.WORKING_DIR
master_memory: int = None
master_vcpu: int = None
masters_count: int = None
nodes_count: int = None
master_cpu_mode: str = None
master_disk: int = None # disk size in MB.
master_disk_size_gib: str = None # disk size in GB.
master_disk_count: int = None # number of disks to create
worker_memory: int = None
worker_vcpu: int = None
workers_count: int = None
worker_cpu_mode: str = None
worker_disk: int = None
worker_disk_count: int = None
network_mtu: int = None
@staticmethod
def get_default(key, default=None) -> Any:
return getattr(global_variables, key) | en | 0.753565 | # disk size in MB. # disk size in GB. # number of disks to create | 2.1559 | 2 |
bitcoinpy/mempool.py | obulpathi/bitcoinpy | 21 | 802 | # MemPool.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import logging
from lib.serialize import uint256_to_shortstr
class MemPool(object):
def __init__(self):
self.pool = {}
# setup logging
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
def add(self, tx):
tx.calc_sha256()
hash = tx.sha256
hashstr = uint256_to_shortstr(hash)
if hash in self.pool:
self.log.write("MemPool.add(%s): already known" % (hashstr,))
return False
if not tx.is_valid():
self.log.write("MemPool.add(%s): invalid TX" % (hashstr, ))
return False
self.pool[hash] = tx
self.log.write("MemPool.add(%s), poolsz %d" % (hashstr, len(self.pool)))
return True
def remove(self, hash):
if hash not in self.pool:
return False
del self.pool[hash]
return True
def size(self):
return len(self.pool)
| # MemPool.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import logging
from lib.serialize import uint256_to_shortstr
class MemPool(object):
def __init__(self):
self.pool = {}
# setup logging
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
def add(self, tx):
tx.calc_sha256()
hash = tx.sha256
hashstr = uint256_to_shortstr(hash)
if hash in self.pool:
self.log.write("MemPool.add(%s): already known" % (hashstr,))
return False
if not tx.is_valid():
self.log.write("MemPool.add(%s): invalid TX" % (hashstr, ))
return False
self.pool[hash] = tx
self.log.write("MemPool.add(%s), poolsz %d" % (hashstr, len(self.pool)))
return True
def remove(self, hash):
if hash not in self.pool:
return False
del self.pool[hash]
return True
def size(self):
return len(self.pool)
| en | 0.719303 | # MemPool.py # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # setup logging | 2.219665 | 2 |
bashspy/parser.py | sarvi/bashspy | 0 | 803 | '''
Created on Jun 13, 2019
@author: sarvi
'''
from sly import Parser
from .lexer import BashLexer
class ASTCommands(list):
__slots__ = ('grouping')
def __init__(self, command, grouping=None):
self.append(command)
self.grouping = grouping
def __repr__(self):
x=[str(i) for i in self]
if self.grouping:
x.insert(0, self.grouping[0])
x.append(self.grouping[1])
return '\n'.join(x)
class ASTCommand:
__slots__ = ('assignments', 'executable', 'arguments', 'redirections', 'pipetocmd')
def __init__(self, executable=None, assignments=None, arguments=None, redirections=None, pipetocmd=None):
self.executable = executable
self.assignments = assignments or list()
self.arguments = arguments or list()
self.redirections = redirections or list()
self.pipetocmd = pipetocmd
def __repr__(self):
if self.executable:
return ('%s %s %s %s %s' % (' '.join([str(i) for i in self.assignments]),
self.executable,
' '.join([str(i) for i in self.arguments]),
' '.join([str(i) for i in self.redirections]),
'| %s'%self.pipetocmd if self.pipetocmd else '')).strip()
else:
return ' '.join([str(i) for i in self.assignments])
class ASTAssignment:
__slots__ = ('variable', 'assignop', 'value')
def __init__(self, variable, assignop, value=None):
self.variable = variable
self.assignop = assignop
self.value = value
def __repr__(self):
return '%s%s%s'%(self.variable, self.assignop, self.value or '')
class ASTArgument:
__slots__ = ('option', 'value')
def __init__(self, option=None, value=None):
self.option = option
self.value = value
def __repr__(self):
return '%s=%s'%(self.option, self.value) if self.option and self.value else (self.option or self.value)
class ASTRedirection:
__slots__ = ('redirect', 'file')
def __init__(self, redirect, file):
self.redirect = redirect
self.file = file
def __repr__(self):
return '%s%s'%(self.redirect, self.file) if self.file else '%s'%(self.redirect)
class ASTTestCombination:
__slots__ = ('leftexpr', 'combination', 'rightexpr', 'test_command', 'group')
def __init__(self, combination, rightexpr, leftexpr=None, test_command=False, group=False):
self.combination = combination
self.rightexpr = rightexpr
self.leftexpr = leftexpr
self.test_command = test_command
self.group = group
def __repr__(self):
if self.leftexpr:
return '%s %s %s'%(self.leftexpr, self.combination, self.rightexpr)
elif self.combination:
return '%s %s'%(self.combination, self.rightexpr)
elif self.test_command:
return '[ %s ]'%(self.rightexpr)
elif self.group:
return '( %s )'%(self.rightexpr)
else:
return '%s'%(self.rightexpr)
class ASTTestCondition:
__slots__ = ('leftvalue', 'test', 'rightvalue')
def __init__(self, test, rightvalue, leftvalue=None):
self.test = test
self.leftvalue = leftvalue
self.rightvalue = rightvalue
def __repr__(self):
if self.test:
return '%s %s %s'%(self.leftvalue, self.test, self.rightvalue) if self.leftvalue else '%s %s'%(self.test, self.rightvalue)
else:
return '%s' % (self.rightvalue)
class ASTIfCommand:
__slots__ = ('test_commands', 'then_commands', 'else_commands')
def __init__(self, test_commands, then_commands, else_commands=None):
self.test_commands = test_commands
self.then_commands = then_commands
self.else_commands = else_commands
def __repr__(self):
if self.else_commands:
return 'if %s; then\n%s\nelse\n%s\nfi' % (self.test_commands, self.then_commands, self.else_commands)
else:
return 'if %s; then\n%s\nfi' % (self.test_commands, self.then_commands)
class BashParser(Parser):
# Get the token list from the lexer (required)
debugfile = 'parser.out'
tokens = BashLexer.tokens
precedence = (
# ('nonassoc', BOOL_NOT),
# ('nonassoc', BOOL_LESS, BOOL_GREATER, BOOL_EQ, BOOL_NEQ), # Nonassociative operators
('left', LIST_COMMANDS),
('left', AMPERSAND, CMDSEP, NEWLINE),
('left', BOOL_COMBINATION),
('left', BOOL_COMPARISON),
('right', BOOL_NOT),
# ('right', END_LINE)
)
# Grammar rules and actions
@_('compound_commands')
def program(self, p):
print('program(%s)' % (p.compound_commands))
return p.compound_commands
@_('compound_command',
'compound_command end_command',
'compound_command end_command compound_commands'
)
def compound_commands(self, p):
# print('simple_command(%s)' % (list(p)))
if getattr(p, 'compound_commands', None):
p.compound_commands.insert(0, p.compound_command)
return p.compound_commands
else:
return ASTCommands(p.compound_command)
@_(
'group_command',
'list_commands',
'if_command',
)
def compound_command(self, p):
return p[0]
@_(
'LBRACE NEWLINE compound_commands RBRACE',
'LBRACE compound_commands RBRACE',
'LPAREN compound_commands RPAREN',
)
def group_command(self, p):
if getattr(p, 'LBRACE', None):
p.compound_commands.grouping = '{}'
elif getattr(p, 'LPAREN', None):
p.compound_commands.grouping = '()'
return getattr(p, 'compound_commands', None)
@_('pipe_command %prec LIST_COMMANDS',
'pipe_command end_pipe',
'pipe_command end_pipe list_commands',
'pipe_command boolean_combination list_commands')
def list_commands(self, p):
if getattr(p, 'boolean_combination', None):
return ASTTestCombination(p.boolean_combination, p.list_commands, p.pipe_command)
elif getattr(p, 'list_commands', None):
p.list_commands.insert(0, p.pipe_command)
return p.list_commands
else:
return ASTCommands(p.pipe_command)
@_('NEWLINE', 'CMDSEP', 'AMPERSAND')
def end_pipe(self, p):
return None
@_('NEWLINE', 'CMDSEP')
def end_command(self, p):
return None
@_('IF list_commands THEN compound_commands FI',
'IF list_commands THEN NEWLINE compound_commands FI',
'IF list_commands THEN compound_commands ELSE compound_commands FI',
'IF list_commands THEN NEWLINE compound_commands ELSE NEWLINE compound_commands FI')
def if_command(self, p):
if getattr(p, 'ELSE', None):
return ASTIfCommand(p.list_commands, p.compound_commands0, p.compound_commands1)
else:
return ASTIfCommand(p.list_commands, p.compound_commands)
# @_( #'test_command',
# 'command_pipe',
# # 'test_command boolean_combination compound_command',
# # 'command_pipe boolean_combination compound_command'
# )
# def compound_command(self, p):
# if getattr(p, 'boolean_combination', None):
# return ASTTestCombination(p.boolean_combination, p.test_commands, p.test_command)
# else:
# return p.test_command
@_('time_command pipe_commands',
'time_command BOOL_NOT pipe_commands',
'pipe_commands',
'BOOL_NOT pipe_commands')
def pipe_command(self, p):
# print('simple_command(%s)' % (list(p)))
cmd = p.pipe_commands
if getattr(p, 'BOOL_NOT', None):
cmd = ASTTestCombination(p.BOOL_NOT, p.pipe_commands)
return cmd
@_('TIME',
'TIME TIME_OPTP')
def time_command(self, p):
cmd = ASTCommand(p.TIME)
if getattr(p, 'TIME_OPTP', None):
cmd.arguments = [p.TIME_OPTP]
return cmd
@_('simple_command',
'simple_command PIPE pipe_commands')
def pipe_commands(self, p):
# print('simple_command(%s)' % (list(p)))
if getattr(p, 'PIPE', None):
p.simple_command.pipetocmd = p.pipe_commands
return p.simple_command
@_('assignments',
'base_command',
'assignments base_command',
'base_command redirects',
'assignments base_command redirects')
def simple_command(self, p):
# print('simple_command(%s)' % (list(p)))
cmd = p.base_command if getattr(p, 'base_command', None) else ASTCommand()
if getattr(p, 'redirects', None):
cmd.redirections = p.redirects
if getattr(p, 'assignments', None):
cmd.assignments = p.assignments
return cmd
@_('redirect',
'redirect redirects')
def redirects(self, p):
return [p.redirect] if len(p)==1 else [p.redirect] + p.redirects
@_('REDIRECT',
'REDIRECT WORD')
def redirect(self, p):
# print('assignment(%s)' % (list(p)))
return ASTRedirection(p.REDIRECT, getattr(p, 'WORD', None))
@_('echo_command',
'exec_command',
'test_command')
def base_command(self, p):
if len(p)==2:
p[1].assignments = p.assignments.assignments
return p[1]
else:
return p[0]
@_('LBRACK test_expressions RBRACK',
'LDBRACK test_expressions RDBRACK')
def test_command(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.command_pipe)
elif getattr(p, 'command_pipe', None):
return ASTTestCombination(None, p.command_pipe)
else:
return ASTTestCombination(None, p.test_expressions, test_command=True)
@_('test_expression',
'LPAREN test_expressions RPAREN',
'BOOL_NOT test_expressions %prec BOOL_NOT',
'test_expressions boolean_combination test_expressions %prec BOOL_COMBINATION'
)
def test_expressions(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.test_expressions)
elif getattr(p, 'boolean_combination', None):
return ASTTestCombination(p.boolean_combination, p.test_expressions1, p.test_expressions0)
elif getattr(p, 'LPAREN', None):
return ASTTestCombination(None, p.test_expressions, group=True)
else:
return p.test_expression
@_('BOOL_OR', 'BOOL_AND')
def boolean_combination(self, p):
return p[0]
@_('value boolean_comparison value %prec BOOL_COMPARISON',
'OPTION value')
def test_expression(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.test_expression)
elif getattr(p, 'LPAREN', None):
return ASTTestCombination(None, p.test_expressions, group=True)
elif getattr(p, 'OPTION', None):
return ASTTestCondition(p.boolean_comparison, p.value)
else:
return ASTTestCondition(p.boolean_comparison, p.value1, p.value0)
@_('OPTION', 'BOOL_EQ', 'BOOL_NEQ', 'BOOL_LESS', 'BOOL_GREATER', 'ASSIGN')
def boolean_comparison(self, p):
return p[0]
# @_(
# 'for_command',
# 'case_command',
# 'WHILE compound_list DO compound_list DONE',
# 'UNTIL compound_list DO compound_list DONE',
# 'select_command',
# 'if_command',
# 'subshell',
# 'group_command',
# 'arith_command'
# 'cond_command',
# 'arith_for_command'
# )
# def shell_command(self, p):
# print('assignments(%s)' % (list(p)))
# return list(p)
@_('ECHO ECHO_STRING')
def echo_command(self, p):
return ASTCommand(p[0], None, [p[1]])
@_('WORD',
'WORD arguments')
def exec_command(self, p):
return ASTCommand(p[0], None, getattr(p, 'arguments', None), getattr(p, 'redirects', None))
@_('argument',
'argument arguments')
def arguments(self, p):
return [p.argument] if len(p)==1 else [p.argument] + p.arguments
@_('OPTION ASSIGN', 'OPTION', 'arg_value')
def argument(self, p):
# print('assignment(%s)' % (list(p)))
return ASTArgument(getattr(p, 'OPTION', None), getattr(p, 'arg_value', None))
@_('value', 'WORD')
def arg_value(self, p):
# print('value(%s)' % (list(p)))
return p[0]
@_('assignment',
'assignment assignments')
def assignments(self, p):
return [p.assignment] if len(p) == 1 else [p.assignment] + p.assignments
@_('LET ID assignop value', 'ID assignop value', 'ID assignop')
def assignment(self, p):
# print('assignment(%s)' % (list(p)))
return ASTAssignment(p.ID, p.assignop, getattr(p, 'value', None))
@_('ASSIGN', 'ARITH_ASSIGN')
def assignop(self, p):
return p[0]
@_('QSTRING', 'DQSTRING', 'BTQUOTED', 'CMD_EXP', 'VAL_STRING', 'VAR_SUBST', 'VARIABLE')
def value(self, p):
# print('value(%s)' % (list(p)))
return p[0]
if __name__ == '__main__':
lexer = BashLexer()
parser = BashParser()
while True:
try:
text = input('Command:>')
result = parser.parse(lexer.tokenize(text))
print(result)
except EOFError:
break | '''
Created on Jun 13, 2019
@author: sarvi
'''
from sly import Parser
from .lexer import BashLexer
class ASTCommands(list):
__slots__ = ('grouping')
def __init__(self, command, grouping=None):
self.append(command)
self.grouping = grouping
def __repr__(self):
x=[str(i) for i in self]
if self.grouping:
x.insert(0, self.grouping[0])
x.append(self.grouping[1])
return '\n'.join(x)
class ASTCommand:
__slots__ = ('assignments', 'executable', 'arguments', 'redirections', 'pipetocmd')
def __init__(self, executable=None, assignments=None, arguments=None, redirections=None, pipetocmd=None):
self.executable = executable
self.assignments = assignments or list()
self.arguments = arguments or list()
self.redirections = redirections or list()
self.pipetocmd = pipetocmd
def __repr__(self):
if self.executable:
return ('%s %s %s %s %s' % (' '.join([str(i) for i in self.assignments]),
self.executable,
' '.join([str(i) for i in self.arguments]),
' '.join([str(i) for i in self.redirections]),
'| %s'%self.pipetocmd if self.pipetocmd else '')).strip()
else:
return ' '.join([str(i) for i in self.assignments])
class ASTAssignment:
__slots__ = ('variable', 'assignop', 'value')
def __init__(self, variable, assignop, value=None):
self.variable = variable
self.assignop = assignop
self.value = value
def __repr__(self):
return '%s%s%s'%(self.variable, self.assignop, self.value or '')
class ASTArgument:
__slots__ = ('option', 'value')
def __init__(self, option=None, value=None):
self.option = option
self.value = value
def __repr__(self):
return '%s=%s'%(self.option, self.value) if self.option and self.value else (self.option or self.value)
class ASTRedirection:
__slots__ = ('redirect', 'file')
def __init__(self, redirect, file):
self.redirect = redirect
self.file = file
def __repr__(self):
return '%s%s'%(self.redirect, self.file) if self.file else '%s'%(self.redirect)
class ASTTestCombination:
__slots__ = ('leftexpr', 'combination', 'rightexpr', 'test_command', 'group')
def __init__(self, combination, rightexpr, leftexpr=None, test_command=False, group=False):
self.combination = combination
self.rightexpr = rightexpr
self.leftexpr = leftexpr
self.test_command = test_command
self.group = group
def __repr__(self):
if self.leftexpr:
return '%s %s %s'%(self.leftexpr, self.combination, self.rightexpr)
elif self.combination:
return '%s %s'%(self.combination, self.rightexpr)
elif self.test_command:
return '[ %s ]'%(self.rightexpr)
elif self.group:
return '( %s )'%(self.rightexpr)
else:
return '%s'%(self.rightexpr)
class ASTTestCondition:
__slots__ = ('leftvalue', 'test', 'rightvalue')
def __init__(self, test, rightvalue, leftvalue=None):
self.test = test
self.leftvalue = leftvalue
self.rightvalue = rightvalue
def __repr__(self):
if self.test:
return '%s %s %s'%(self.leftvalue, self.test, self.rightvalue) if self.leftvalue else '%s %s'%(self.test, self.rightvalue)
else:
return '%s' % (self.rightvalue)
class ASTIfCommand:
__slots__ = ('test_commands', 'then_commands', 'else_commands')
def __init__(self, test_commands, then_commands, else_commands=None):
self.test_commands = test_commands
self.then_commands = then_commands
self.else_commands = else_commands
def __repr__(self):
if self.else_commands:
return 'if %s; then\n%s\nelse\n%s\nfi' % (self.test_commands, self.then_commands, self.else_commands)
else:
return 'if %s; then\n%s\nfi' % (self.test_commands, self.then_commands)
class BashParser(Parser):
# Get the token list from the lexer (required)
debugfile = 'parser.out'
tokens = BashLexer.tokens
precedence = (
# ('nonassoc', BOOL_NOT),
# ('nonassoc', BOOL_LESS, BOOL_GREATER, BOOL_EQ, BOOL_NEQ), # Nonassociative operators
('left', LIST_COMMANDS),
('left', AMPERSAND, CMDSEP, NEWLINE),
('left', BOOL_COMBINATION),
('left', BOOL_COMPARISON),
('right', BOOL_NOT),
# ('right', END_LINE)
)
# Grammar rules and actions
@_('compound_commands')
def program(self, p):
print('program(%s)' % (p.compound_commands))
return p.compound_commands
@_('compound_command',
'compound_command end_command',
'compound_command end_command compound_commands'
)
def compound_commands(self, p):
# print('simple_command(%s)' % (list(p)))
if getattr(p, 'compound_commands', None):
p.compound_commands.insert(0, p.compound_command)
return p.compound_commands
else:
return ASTCommands(p.compound_command)
@_(
'group_command',
'list_commands',
'if_command',
)
def compound_command(self, p):
return p[0]
@_(
'LBRACE NEWLINE compound_commands RBRACE',
'LBRACE compound_commands RBRACE',
'LPAREN compound_commands RPAREN',
)
def group_command(self, p):
if getattr(p, 'LBRACE', None):
p.compound_commands.grouping = '{}'
elif getattr(p, 'LPAREN', None):
p.compound_commands.grouping = '()'
return getattr(p, 'compound_commands', None)
@_('pipe_command %prec LIST_COMMANDS',
'pipe_command end_pipe',
'pipe_command end_pipe list_commands',
'pipe_command boolean_combination list_commands')
def list_commands(self, p):
if getattr(p, 'boolean_combination', None):
return ASTTestCombination(p.boolean_combination, p.list_commands, p.pipe_command)
elif getattr(p, 'list_commands', None):
p.list_commands.insert(0, p.pipe_command)
return p.list_commands
else:
return ASTCommands(p.pipe_command)
@_('NEWLINE', 'CMDSEP', 'AMPERSAND')
def end_pipe(self, p):
return None
@_('NEWLINE', 'CMDSEP')
def end_command(self, p):
return None
@_('IF list_commands THEN compound_commands FI',
'IF list_commands THEN NEWLINE compound_commands FI',
'IF list_commands THEN compound_commands ELSE compound_commands FI',
'IF list_commands THEN NEWLINE compound_commands ELSE NEWLINE compound_commands FI')
def if_command(self, p):
if getattr(p, 'ELSE', None):
return ASTIfCommand(p.list_commands, p.compound_commands0, p.compound_commands1)
else:
return ASTIfCommand(p.list_commands, p.compound_commands)
# @_( #'test_command',
# 'command_pipe',
# # 'test_command boolean_combination compound_command',
# # 'command_pipe boolean_combination compound_command'
# )
# def compound_command(self, p):
# if getattr(p, 'boolean_combination', None):
# return ASTTestCombination(p.boolean_combination, p.test_commands, p.test_command)
# else:
# return p.test_command
@_('time_command pipe_commands',
'time_command BOOL_NOT pipe_commands',
'pipe_commands',
'BOOL_NOT pipe_commands')
def pipe_command(self, p):
# print('simple_command(%s)' % (list(p)))
cmd = p.pipe_commands
if getattr(p, 'BOOL_NOT', None):
cmd = ASTTestCombination(p.BOOL_NOT, p.pipe_commands)
return cmd
@_('TIME',
'TIME TIME_OPTP')
def time_command(self, p):
cmd = ASTCommand(p.TIME)
if getattr(p, 'TIME_OPTP', None):
cmd.arguments = [p.TIME_OPTP]
return cmd
@_('simple_command',
'simple_command PIPE pipe_commands')
def pipe_commands(self, p):
# print('simple_command(%s)' % (list(p)))
if getattr(p, 'PIPE', None):
p.simple_command.pipetocmd = p.pipe_commands
return p.simple_command
@_('assignments',
'base_command',
'assignments base_command',
'base_command redirects',
'assignments base_command redirects')
def simple_command(self, p):
# print('simple_command(%s)' % (list(p)))
cmd = p.base_command if getattr(p, 'base_command', None) else ASTCommand()
if getattr(p, 'redirects', None):
cmd.redirections = p.redirects
if getattr(p, 'assignments', None):
cmd.assignments = p.assignments
return cmd
@_('redirect',
'redirect redirects')
def redirects(self, p):
return [p.redirect] if len(p)==1 else [p.redirect] + p.redirects
@_('REDIRECT',
'REDIRECT WORD')
def redirect(self, p):
# print('assignment(%s)' % (list(p)))
return ASTRedirection(p.REDIRECT, getattr(p, 'WORD', None))
@_('echo_command',
'exec_command',
'test_command')
def base_command(self, p):
if len(p)==2:
p[1].assignments = p.assignments.assignments
return p[1]
else:
return p[0]
@_('LBRACK test_expressions RBRACK',
'LDBRACK test_expressions RDBRACK')
def test_command(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.command_pipe)
elif getattr(p, 'command_pipe', None):
return ASTTestCombination(None, p.command_pipe)
else:
return ASTTestCombination(None, p.test_expressions, test_command=True)
@_('test_expression',
'LPAREN test_expressions RPAREN',
'BOOL_NOT test_expressions %prec BOOL_NOT',
'test_expressions boolean_combination test_expressions %prec BOOL_COMBINATION'
)
def test_expressions(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.test_expressions)
elif getattr(p, 'boolean_combination', None):
return ASTTestCombination(p.boolean_combination, p.test_expressions1, p.test_expressions0)
elif getattr(p, 'LPAREN', None):
return ASTTestCombination(None, p.test_expressions, group=True)
else:
return p.test_expression
@_('BOOL_OR', 'BOOL_AND')
def boolean_combination(self, p):
return p[0]
@_('value boolean_comparison value %prec BOOL_COMPARISON',
'OPTION value')
def test_expression(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.test_expression)
elif getattr(p, 'LPAREN', None):
return ASTTestCombination(None, p.test_expressions, group=True)
elif getattr(p, 'OPTION', None):
return ASTTestCondition(p.boolean_comparison, p.value)
else:
return ASTTestCondition(p.boolean_comparison, p.value1, p.value0)
@_('OPTION', 'BOOL_EQ', 'BOOL_NEQ', 'BOOL_LESS', 'BOOL_GREATER', 'ASSIGN')
def boolean_comparison(self, p):
return p[0]
# @_(
# 'for_command',
# 'case_command',
# 'WHILE compound_list DO compound_list DONE',
# 'UNTIL compound_list DO compound_list DONE',
# 'select_command',
# 'if_command',
# 'subshell',
# 'group_command',
# 'arith_command'
# 'cond_command',
# 'arith_for_command'
# )
# def shell_command(self, p):
# print('assignments(%s)' % (list(p)))
# return list(p)
@_('ECHO ECHO_STRING')
def echo_command(self, p):
return ASTCommand(p[0], None, [p[1]])
@_('WORD',
'WORD arguments')
def exec_command(self, p):
return ASTCommand(p[0], None, getattr(p, 'arguments', None), getattr(p, 'redirects', None))
@_('argument',
'argument arguments')
def arguments(self, p):
return [p.argument] if len(p)==1 else [p.argument] + p.arguments
@_('OPTION ASSIGN', 'OPTION', 'arg_value')
def argument(self, p):
# print('assignment(%s)' % (list(p)))
return ASTArgument(getattr(p, 'OPTION', None), getattr(p, 'arg_value', None))
@_('value', 'WORD')
def arg_value(self, p):
# print('value(%s)' % (list(p)))
return p[0]
@_('assignment',
'assignment assignments')
def assignments(self, p):
return [p.assignment] if len(p) == 1 else [p.assignment] + p.assignments
@_('LET ID assignop value', 'ID assignop value', 'ID assignop')
def assignment(self, p):
# print('assignment(%s)' % (list(p)))
return ASTAssignment(p.ID, p.assignop, getattr(p, 'value', None))
@_('ASSIGN', 'ARITH_ASSIGN')
def assignop(self, p):
return p[0]
@_('QSTRING', 'DQSTRING', 'BTQUOTED', 'CMD_EXP', 'VAL_STRING', 'VAR_SUBST', 'VARIABLE')
def value(self, p):
# print('value(%s)' % (list(p)))
return p[0]
if __name__ == '__main__':
lexer = BashLexer()
parser = BashParser()
while True:
try:
text = input('Command:>')
result = parser.parse(lexer.tokenize(text))
print(result)
except EOFError:
break | en | 0.39593 | Created on Jun 13, 2019 @author: sarvi # Get the token list from the lexer (required) # ('nonassoc', BOOL_NOT), # ('nonassoc', BOOL_LESS, BOOL_GREATER, BOOL_EQ, BOOL_NEQ), # Nonassociative operators # ('right', END_LINE) # Grammar rules and actions # print('simple_command(%s)' % (list(p))) # @_( #'test_command', # 'command_pipe', # # 'test_command boolean_combination compound_command', # # 'command_pipe boolean_combination compound_command' # ) # def compound_command(self, p): # if getattr(p, 'boolean_combination', None): # return ASTTestCombination(p.boolean_combination, p.test_commands, p.test_command) # else: # return p.test_command # print('simple_command(%s)' % (list(p))) # print('simple_command(%s)' % (list(p))) # print('simple_command(%s)' % (list(p))) # print('assignment(%s)' % (list(p))) # @_( # 'for_command', # 'case_command', # 'WHILE compound_list DO compound_list DONE', # 'UNTIL compound_list DO compound_list DONE', # 'select_command', # 'if_command', # 'subshell', # 'group_command', # 'arith_command' # 'cond_command', # 'arith_for_command' # ) # def shell_command(self, p): # print('assignments(%s)' % (list(p))) # return list(p) # print('assignment(%s)' % (list(p))) # print('value(%s)' % (list(p))) # print('assignment(%s)' % (list(p))) # print('value(%s)' % (list(p))) | 2.713098 | 3 |
Module03/pregnancy_wheel.py | biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620 | 0 | 804 | <reponame>biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620<gh_stars>0
import datetime
def print_header():
print('----------------------------')
print(' Due Date APP ')
print('----------------------------')
print()
def get_lmp_from_patient():
print("When was the patient's last normal menstrual cycle? ")
date_str = input('Format: [dd/mm/yyyy}')
#'05/06/2018'
parts = date_str.split('/')
if len(parts)!= 3:
print('Bad date found', date_str)
return get_lmp_from_patient()
year = int(parts[2])
month = int(parts[1])
day = int(parts[0])
lmp = datetime.date(year, month, day)
#print(lmp)
return lmp
#avg pregnancy length is 281 days
def compute_days_between_dates(original_date, target_date):
this_year = datetime.date(target_date.year, original_date.month, original_date.day)
dt = this_year - target_date
return dt.days
def print_due_date_information(min_due_date, max_due_date, expected_due_date):
print('Your expected due date is ', expected_due_date.strftime('%a %b %d %Y'))
print('But it may be as early as ', min_due_date.strftime('%m/%d/%Y'))
print('But as late as ', max_due_date.strftime('%m/%d/%Y'))
def main():
print_header()
lmp_day = get_lmp_from_patient()
gest_length = datetime.timedelta(days = 281)
gest_std = datetime.timedelta(days = 13)
expected_due_date = lmp_day + gest_length
min_due_date = expected_due_date - gest_std
max_due_date = expected_due_date + gest_std
print_due_date_information(min_due_date, max_due_date, expected_due_date)
main() | import datetime
def print_header():
print('----------------------------')
print(' Due Date APP ')
print('----------------------------')
print()
def get_lmp_from_patient():
print("When was the patient's last normal menstrual cycle? ")
date_str = input('Format: [dd/mm/yyyy}')
#'05/06/2018'
parts = date_str.split('/')
if len(parts)!= 3:
print('Bad date found', date_str)
return get_lmp_from_patient()
year = int(parts[2])
month = int(parts[1])
day = int(parts[0])
lmp = datetime.date(year, month, day)
#print(lmp)
return lmp
#avg pregnancy length is 281 days
def compute_days_between_dates(original_date, target_date):
this_year = datetime.date(target_date.year, original_date.month, original_date.day)
dt = this_year - target_date
return dt.days
def print_due_date_information(min_due_date, max_due_date, expected_due_date):
print('Your expected due date is ', expected_due_date.strftime('%a %b %d %Y'))
print('But it may be as early as ', min_due_date.strftime('%m/%d/%Y'))
print('But as late as ', max_due_date.strftime('%m/%d/%Y'))
def main():
print_header()
lmp_day = get_lmp_from_patient()
gest_length = datetime.timedelta(days = 281)
gest_std = datetime.timedelta(days = 13)
expected_due_date = lmp_day + gest_length
min_due_date = expected_due_date - gest_std
max_due_date = expected_due_date + gest_std
print_due_date_information(min_due_date, max_due_date, expected_due_date)
main() | en | 0.550672 | #'05/06/2018' #print(lmp) #avg pregnancy length is 281 days | 3.799757 | 4 |
costcalculator/forms.py | connor-c/Trip-Gas-Cost-Calculator | 0 | 805 | from django import forms
from django.core.validators import MinValueValidator, MinLengthValidator
class OriginForm(forms.Form):
origin_address = forms.CharField(validators=[MinLengthValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '123 Tech St, Silicon Valley, CA 00000'}))
class DestinationForm(forms.Form):
destination_address = forms.CharField(validators=[MinLengthValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '123 Tech St, Silicon Valley, CA 00000'}))
class GasPriceForm(forms.Form):
gas_price = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '1.23'}))
class MpgForm(forms.Form):
mpg = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '12'}))
class NumPeopleForm(forms.Form):
num_people = forms.IntegerField(validators=[MinValueValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '1 (default is 1 if left blank)'}))
class DistanceForm(forms.Form):
distance = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '15.2'}))
| from django import forms
from django.core.validators import MinValueValidator, MinLengthValidator
class OriginForm(forms.Form):
origin_address = forms.CharField(validators=[MinLengthValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '123 Tech St, Silicon Valley, CA 00000'}))
class DestinationForm(forms.Form):
destination_address = forms.CharField(validators=[MinLengthValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '123 Tech St, Silicon Valley, CA 00000'}))
class GasPriceForm(forms.Form):
gas_price = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '1.23'}))
class MpgForm(forms.Form):
mpg = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '12'}))
class NumPeopleForm(forms.Form):
num_people = forms.IntegerField(validators=[MinValueValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '1 (default is 1 if left blank)'}))
class DistanceForm(forms.Form):
distance = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '15.2'}))
| none | 1 | 2.225017 | 2 |
|
tasks/storm_raffle_handler.py | Ayouuuu/bili2.0 | 2 | 806 | <reponame>Ayouuuu/bili2.0
import bili_statistics
from reqs.storm_raffle_handler import StormRaffleHandlerReq
from tasks.utils import UtilsTask
from .base_class import Forced, DontWait, Multi
class StormRaffleJoinTask(Forced, DontWait, Multi):
TASK_NAME = 'join_storm_raffle'
# 为了速度,有时不用等room_id验证就参加,置room_id为0,is_normal_room自然会返回固定值true
@staticmethod
async def check(user, room_id, raffle_id=None):
if not await UtilsTask.is_normal_room(user, room_id):
return
if raffle_id is not None:
json_rsp = {'data': {'id': raffle_id}}
else:
json_rsp = await user.req_s(StormRaffleHandlerReq.check, user, room_id)
next_step_settings = []
data = json_rsp['data']
if data:
raffle_id = int(data['id'])
if not bili_statistics.is_raffleid_duplicate(raffle_id/1000000):
user.info(f'确认获取到飓风暴抽奖 {raffle_id}', with_userid=False)
next_step_setting = (-2, (1, 3), room_id, raffle_id)
next_step_settings.append(next_step_setting)
next_step_setting = (-2, (2, 4), room_id, raffle_id)
next_step_settings.append(next_step_setting)
bili_statistics.add2raffle_ids(raffle_id/1000000, 'STORM')
return next_step_settings
@staticmethod
async def work(user, room_id, raffle_id):
# await UtilsTask.enter_room(user, room_id)
json_rsp = await user.req_s(StormRaffleHandlerReq.join, user, raffle_id)
bili_statistics.add2joined_raffles('节奏风暴(合计)', user.id)
if not json_rsp['code']:
data = json_rsp['data']
gift_name = data["gift_name"]
gift_num = data["gift_num"]
user.info(f'飓风暴({raffle_id})的参与结果: {gift_name}X{gift_num}')
bili_statistics.add2results(gift_name, user.id, gift_num)
return
print(json_rsp)
| import bili_statistics
from reqs.storm_raffle_handler import StormRaffleHandlerReq
from tasks.utils import UtilsTask
from .base_class import Forced, DontWait, Multi
class StormRaffleJoinTask(Forced, DontWait, Multi):
TASK_NAME = 'join_storm_raffle'
# 为了速度,有时不用等room_id验证就参加,置room_id为0,is_normal_room自然会返回固定值true
@staticmethod
async def check(user, room_id, raffle_id=None):
if not await UtilsTask.is_normal_room(user, room_id):
return
if raffle_id is not None:
json_rsp = {'data': {'id': raffle_id}}
else:
json_rsp = await user.req_s(StormRaffleHandlerReq.check, user, room_id)
next_step_settings = []
data = json_rsp['data']
if data:
raffle_id = int(data['id'])
if not bili_statistics.is_raffleid_duplicate(raffle_id/1000000):
user.info(f'确认获取到飓风暴抽奖 {raffle_id}', with_userid=False)
next_step_setting = (-2, (1, 3), room_id, raffle_id)
next_step_settings.append(next_step_setting)
next_step_setting = (-2, (2, 4), room_id, raffle_id)
next_step_settings.append(next_step_setting)
bili_statistics.add2raffle_ids(raffle_id/1000000, 'STORM')
return next_step_settings
@staticmethod
async def work(user, room_id, raffle_id):
# await UtilsTask.enter_room(user, room_id)
json_rsp = await user.req_s(StormRaffleHandlerReq.join, user, raffle_id)
bili_statistics.add2joined_raffles('节奏风暴(合计)', user.id)
if not json_rsp['code']:
data = json_rsp['data']
gift_name = data["gift_name"]
gift_num = data["gift_num"]
user.info(f'飓风暴({raffle_id})的参与结果: {gift_name}X{gift_num}')
bili_statistics.add2results(gift_name, user.id, gift_num)
return
print(json_rsp) | zh | 0.424028 | # 为了速度,有时不用等room_id验证就参加,置room_id为0,is_normal_room自然会返回固定值true # await UtilsTask.enter_room(user, room_id) | 1.871866 | 2 |
u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py | ALSM-PhD/quip_classification | 45 | 807 | <gh_stars>10-100
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("grad_net_interpolation")
class GradNetInterpolationNode(treeano.NodeImpl):
"""
interpolates outputs between 2 nodes
"""
hyperparameter_names = ("late_gate",)
children_container = treeano.core.DictChildrenContainerSchema(
early=treeano.core.ChildContainer,
late=treeano.core.ChildContainer,
)
input_keys = ("early", "late")
def init_state(self, network):
children = self.raw_children()
early = children["early"]
late = children["late"]
network.forward_input_to(early.name)
network.forward_input_to(late.name)
network.take_output_from(early.name, to_key="early")
network.take_output_from(late.name, to_key="late")
def compute_output(self, network, early_vw, late_vw):
late_gate = network.find_hyperparameter(["late_gate"], 1)
out_var = (early_vw.variable * (1 - late_gate)
+ late_vw.variable * late_gate)
out_shape = []
assert early_vw.ndim == late_vw.ndim
for e, l in zip(early_vw.shape, late_vw.shape):
if e is None and l is None:
out_shape.append(None)
elif e is None:
out_shape.append(l)
elif l is None:
out_shape.append(e)
else:
assert e == l
out_shape.append(e)
network.create_vw(
"default",
variable=out_var,
shape=tuple(out_shape),
tags={"output"},
)
@treeano.register_node("grad_net_optimizer_interpolation")
class _GradNetOptimizerInterpolationNode(treeano.Wrapper1NodeImpl):
hyperparameter_names = ("late_gate",
"gradnet_epsilon",
"epsilon",
"multiplicative_inverse_for_early_gate")
def init_state(self, network):
super(_GradNetOptimizerInterpolationNode, self).init_state(network)
epsilon = network.find_hyperparameter(["gradnet_epsilon",
"epsilon"],
1e-3)
late_gate = network.find_hyperparameter(["late_gate"], 1)
late_gate = treeano.utils.as_fX(late_gate)
# NOTE: late gate cannot be 0 because the early gate is divide by it
# AND multiplied by it. Clipping only for the early gate will cause
# no updates to occur.
late_gate = T.clip(late_gate, epsilon, 1)
use_multiplicative_inverse = network.find_hyperparameter(
["multiplicative_inverse_for_early_gate"], False)
if use_multiplicative_inverse:
early_gate = epsilon / late_gate
else:
early_gate = 1 - late_gate
network.set_hyperparameter(self.name + "_late_update_scale",
"update_scale_factor",
late_gate)
network.set_hyperparameter(self.name + "_early_update_scale",
"update_scale_factor",
# these updates are also multiplied by
# late_gate later on, so rescale them
early_gate / late_gate)
def GradNetOptimizerInterpolationNode(name,
children,
early,
late,
**kwargs):
"""
interpolates updates from 2 optimizers nodes
NOTE: this is a hack to take in node constructors as arguments
"""
assert set(children.keys()) == {"subtree", "cost"}
subtree = children["subtree"]
cost = children["cost"]
cost_ref = tn.ReferenceNode(name + "_costref", reference=cost.name)
late_subtree = tn.UpdateScaleNode(name + "_late_update_scale", subtree)
late_node = late(name + "_late", {"subtree": late_subtree, "cost": cost})
early_subtree = tn.UpdateScaleNode(name + "_early_update_scale", late_node)
early_node = early(name + "_early",
{"subtree": early_subtree, "cost": cost_ref})
# NOTE: need separate node to forward hyperparameter
return _GradNetOptimizerInterpolationNode(name, early_node, **kwargs)
def GradualSimpleBatchNormalizationNode(name):
from treeano.sandbox.nodes import batch_normalization as bn
return GradNetInterpolationNode(
name,
{"early": bn.SimpleBatchNormalizationNode(name + "_bn"),
"late": tn.IdentityNode(name + "_identity")})
GradualBNNode = GradualSimpleBatchNormalizationNode
| import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("grad_net_interpolation")
class GradNetInterpolationNode(treeano.NodeImpl):
"""
interpolates outputs between 2 nodes
"""
hyperparameter_names = ("late_gate",)
children_container = treeano.core.DictChildrenContainerSchema(
early=treeano.core.ChildContainer,
late=treeano.core.ChildContainer,
)
input_keys = ("early", "late")
def init_state(self, network):
children = self.raw_children()
early = children["early"]
late = children["late"]
network.forward_input_to(early.name)
network.forward_input_to(late.name)
network.take_output_from(early.name, to_key="early")
network.take_output_from(late.name, to_key="late")
def compute_output(self, network, early_vw, late_vw):
late_gate = network.find_hyperparameter(["late_gate"], 1)
out_var = (early_vw.variable * (1 - late_gate)
+ late_vw.variable * late_gate)
out_shape = []
assert early_vw.ndim == late_vw.ndim
for e, l in zip(early_vw.shape, late_vw.shape):
if e is None and l is None:
out_shape.append(None)
elif e is None:
out_shape.append(l)
elif l is None:
out_shape.append(e)
else:
assert e == l
out_shape.append(e)
network.create_vw(
"default",
variable=out_var,
shape=tuple(out_shape),
tags={"output"},
)
@treeano.register_node("grad_net_optimizer_interpolation")
class _GradNetOptimizerInterpolationNode(treeano.Wrapper1NodeImpl):
hyperparameter_names = ("late_gate",
"gradnet_epsilon",
"epsilon",
"multiplicative_inverse_for_early_gate")
def init_state(self, network):
super(_GradNetOptimizerInterpolationNode, self).init_state(network)
epsilon = network.find_hyperparameter(["gradnet_epsilon",
"epsilon"],
1e-3)
late_gate = network.find_hyperparameter(["late_gate"], 1)
late_gate = treeano.utils.as_fX(late_gate)
# NOTE: late gate cannot be 0 because the early gate is divide by it
# AND multiplied by it. Clipping only for the early gate will cause
# no updates to occur.
late_gate = T.clip(late_gate, epsilon, 1)
use_multiplicative_inverse = network.find_hyperparameter(
["multiplicative_inverse_for_early_gate"], False)
if use_multiplicative_inverse:
early_gate = epsilon / late_gate
else:
early_gate = 1 - late_gate
network.set_hyperparameter(self.name + "_late_update_scale",
"update_scale_factor",
late_gate)
network.set_hyperparameter(self.name + "_early_update_scale",
"update_scale_factor",
# these updates are also multiplied by
# late_gate later on, so rescale them
early_gate / late_gate)
def GradNetOptimizerInterpolationNode(name,
children,
early,
late,
**kwargs):
"""
interpolates updates from 2 optimizers nodes
NOTE: this is a hack to take in node constructors as arguments
"""
assert set(children.keys()) == {"subtree", "cost"}
subtree = children["subtree"]
cost = children["cost"]
cost_ref = tn.ReferenceNode(name + "_costref", reference=cost.name)
late_subtree = tn.UpdateScaleNode(name + "_late_update_scale", subtree)
late_node = late(name + "_late", {"subtree": late_subtree, "cost": cost})
early_subtree = tn.UpdateScaleNode(name + "_early_update_scale", late_node)
early_node = early(name + "_early",
{"subtree": early_subtree, "cost": cost_ref})
# NOTE: need separate node to forward hyperparameter
return _GradNetOptimizerInterpolationNode(name, early_node, **kwargs)
def GradualSimpleBatchNormalizationNode(name):
from treeano.sandbox.nodes import batch_normalization as bn
return GradNetInterpolationNode(
name,
{"early": bn.SimpleBatchNormalizationNode(name + "_bn"),
"late": tn.IdentityNode(name + "_identity")})
GradualBNNode = GradualSimpleBatchNormalizationNode | en | 0.901138 | interpolates outputs between 2 nodes # NOTE: late gate cannot be 0 because the early gate is divide by it # AND multiplied by it. Clipping only for the early gate will cause # no updates to occur. # these updates are also multiplied by # late_gate later on, so rescale them interpolates updates from 2 optimizers nodes NOTE: this is a hack to take in node constructors as arguments # NOTE: need separate node to forward hyperparameter | 2.246186 | 2 |
minus80/RawFile.py | brohammer/Minus80 | 0 | 808 | <filename>minus80/RawFile.py
import gzip #pragma: no cover
import bz2 #pragma: no cover
import lzma #pragma: no cover
class RawFile(object):#pragma: no cover
def __init__(self,filename):
self.filename = filename
if filename.endswith('.gz'):
self.handle = gzip.open(filename,'rt')
elif filename.endswith('bz2'):
self.handle = bz2.open(filename,'rt')
elif filename.endswith('xz'):
self.handle = lzma.open(filenaem,'rt')
else:
self.handle = open(filename,'r')
def __enter__(self):
return self.handle
def __exit__(self,dtype,value,traceback):
self.handle.close()
| <filename>minus80/RawFile.py
import gzip #pragma: no cover
import bz2 #pragma: no cover
import lzma #pragma: no cover
class RawFile(object):#pragma: no cover
def __init__(self,filename):
self.filename = filename
if filename.endswith('.gz'):
self.handle = gzip.open(filename,'rt')
elif filename.endswith('bz2'):
self.handle = bz2.open(filename,'rt')
elif filename.endswith('xz'):
self.handle = lzma.open(filenaem,'rt')
else:
self.handle = open(filename,'r')
def __enter__(self):
return self.handle
def __exit__(self,dtype,value,traceback):
self.handle.close()
| en | 0.360532 | #pragma: no cover #pragma: no cover #pragma: no cover #pragma: no cover | 3.264506 | 3 |
utils/config.py | jtr109/Alpha2kindle | 0 | 809 | <gh_stars>0
# -*- coding: utf-8 -*-
import os
class BaseConf(object):
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/55.0.2883.95 "
"Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,*/*;"
"q=0.8",
"Accept-Encoding": "gzip, deflate, sdch, br",
"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4",
"Cache-Control": "max-age=0",
}
class TestConf(BaseConf):
REDIS_URL = "redis://:{password}@{hostname}:{port}/{db_number}".format(
password=os.environ.get("REDIS_PWD"),
hostname='127.0.0.1',
port=6379,
db_number=0
)
CURCONF = TestConf
| # -*- coding: utf-8 -*-
import os
class BaseConf(object):
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/55.0.2883.95 "
"Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,*/*;"
"q=0.8",
"Accept-Encoding": "gzip, deflate, sdch, br",
"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4",
"Cache-Control": "max-age=0",
}
class TestConf(BaseConf):
REDIS_URL = "redis://:{password}@{hostname}:{port}/{db_number}".format(
password=os.environ.get("REDIS_PWD"),
hostname='127.0.0.1',
port=6379,
db_number=0
)
CURCONF = TestConf | en | 0.769321 | # -*- coding: utf-8 -*- | 2.359047 | 2 |
framework/database/__init__.py | fabmiz/osf.io | 0 | 810 | <filename>framework/database/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
import functools
import httplib as http
import markupsafe
from django.core.paginator import Paginator
from django.db.models import Q, QuerySet
from framework.exceptions import HTTPError
def get_or_http_error(Model, pk_or_query, allow_deleted=False, display_name=None):
"""Load an instance of Model by primary key or modularodm.Q query. Raise an appropriate
HTTPError if no record is found or if the query fails to find a unique record
:param type Model: StoredObject subclass to query
:param pk_or_query:
:type pk_or_query: either
- a <basestring> representation of the record's primary key, e.g. 'abcdef'
- a <QueryBase> subclass query to uniquely select a record, e.g.
Q('title', 'eq', 'Entitled') & Q('version', 'eq', 1)
:param bool allow_deleted: allow deleleted records?
:param basestring display_name:
:raises: HTTPError(404) if the record does not exist
:raises: HTTPError(400) if no unique record is found
:raises: HTTPError(410) if the resource is deleted and allow_deleted = False
:return: Model instance
"""
display_name = display_name or ''
# FIXME: Not everything that uses this decorator needs to be markupsafe, but OsfWebRenderer error.mako does...
safe_name = markupsafe.escape(display_name)
if isinstance(pk_or_query, Q):
try:
instance = Model.objects.get(pk_or_query)
except Model.DoesNotExist:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record matching that query could be found'.format(name=safe_name)
))
except Model.MultipleObjectsReturned:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='The query must match exactly one {name} record'.format(name=safe_name)
))
else:
instance = Model.load(pk_or_query)
if not instance:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record with that primary key could be found'.format(name=safe_name)
))
if getattr(instance, 'is_deleted', False) and getattr(instance, 'suspended', False):
raise HTTPError(451, data=dict( # 451 - Unavailable For Legal Reasons
message_short='Content removed',
message_long='This content has been removed'
))
if not allow_deleted and getattr(instance, 'is_deleted', False):
raise HTTPError(http.GONE)
return instance
def autoload(Model, extract_key, inject_key, func):
"""Decorator to autoload a StoredObject instance by primary key and inject into kwargs. Raises
an appropriate HTTPError (see #get_or_http_error)
:param type Model: database collection model to query (should be a subclass of StoredObject)
:param basestring extract_key: named URL field containing the desired primary key to be fetched
from the database
:param basestring inject_key: name the instance will be accessible as when it's injected as an
argument to the function
Example usage: ::
def get_node(node_id):
node = Node.load(node_id)
...
becomes
import functools
autoload_node = functools.partial(autoload, Node, 'node_id', 'node')
@autoload_node
def get_node(node):
...
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
primary_key = kwargs.get(extract_key)
instance = get_or_http_error(Model, primary_key)
kwargs[inject_key] = instance
return func(*args, **kwargs)
return wrapper
def paginated(model, query=None, increment=200, each=True, include=None):
"""Paginate a MODM query.
:param StoredObject model: Model to query.
:param Q query: Optional query object.
:param int increment: Page size
:param bool each: If True, each record is yielded. If False, pages
are yielded.
"""
if include and query:
queryset = model.objects.filter(query).include(*include)
elif query:
queryset = model.objects.filter(query)
else:
queryset = model.objects.all()
# Pagination requires an order by clause, especially when using Postgres.
# see: https://docs.djangoproject.com/en/1.10/topics/pagination/#required-arguments
if isinstance(queryset, QuerySet) and not queryset.ordered:
queryset = queryset.order_by(queryset.model._meta.pk.name)
paginator = Paginator(queryset.all(), increment)
for page_num in paginator.page_range:
page = paginator.page(page_num)
if each:
for item in page.object_list:
yield item
else:
yield page.object_list
| <filename>framework/database/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
import functools
import httplib as http
import markupsafe
from django.core.paginator import Paginator
from django.db.models import Q, QuerySet
from framework.exceptions import HTTPError
def get_or_http_error(Model, pk_or_query, allow_deleted=False, display_name=None):
"""Load an instance of Model by primary key or modularodm.Q query. Raise an appropriate
HTTPError if no record is found or if the query fails to find a unique record
:param type Model: StoredObject subclass to query
:param pk_or_query:
:type pk_or_query: either
- a <basestring> representation of the record's primary key, e.g. 'abcdef'
- a <QueryBase> subclass query to uniquely select a record, e.g.
Q('title', 'eq', 'Entitled') & Q('version', 'eq', 1)
:param bool allow_deleted: allow deleleted records?
:param basestring display_name:
:raises: HTTPError(404) if the record does not exist
:raises: HTTPError(400) if no unique record is found
:raises: HTTPError(410) if the resource is deleted and allow_deleted = False
:return: Model instance
"""
display_name = display_name or ''
# FIXME: Not everything that uses this decorator needs to be markupsafe, but OsfWebRenderer error.mako does...
safe_name = markupsafe.escape(display_name)
if isinstance(pk_or_query, Q):
try:
instance = Model.objects.get(pk_or_query)
except Model.DoesNotExist:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record matching that query could be found'.format(name=safe_name)
))
except Model.MultipleObjectsReturned:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='The query must match exactly one {name} record'.format(name=safe_name)
))
else:
instance = Model.load(pk_or_query)
if not instance:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record with that primary key could be found'.format(name=safe_name)
))
if getattr(instance, 'is_deleted', False) and getattr(instance, 'suspended', False):
raise HTTPError(451, data=dict( # 451 - Unavailable For Legal Reasons
message_short='Content removed',
message_long='This content has been removed'
))
if not allow_deleted and getattr(instance, 'is_deleted', False):
raise HTTPError(http.GONE)
return instance
def autoload(Model, extract_key, inject_key, func):
"""Decorator to autoload a StoredObject instance by primary key and inject into kwargs. Raises
an appropriate HTTPError (see #get_or_http_error)
:param type Model: database collection model to query (should be a subclass of StoredObject)
:param basestring extract_key: named URL field containing the desired primary key to be fetched
from the database
:param basestring inject_key: name the instance will be accessible as when it's injected as an
argument to the function
Example usage: ::
def get_node(node_id):
node = Node.load(node_id)
...
becomes
import functools
autoload_node = functools.partial(autoload, Node, 'node_id', 'node')
@autoload_node
def get_node(node):
...
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
primary_key = kwargs.get(extract_key)
instance = get_or_http_error(Model, primary_key)
kwargs[inject_key] = instance
return func(*args, **kwargs)
return wrapper
def paginated(model, query=None, increment=200, each=True, include=None):
"""Paginate a MODM query.
:param StoredObject model: Model to query.
:param Q query: Optional query object.
:param int increment: Page size
:param bool each: If True, each record is yielded. If False, pages
are yielded.
"""
if include and query:
queryset = model.objects.filter(query).include(*include)
elif query:
queryset = model.objects.filter(query)
else:
queryset = model.objects.all()
# Pagination requires an order by clause, especially when using Postgres.
# see: https://docs.djangoproject.com/en/1.10/topics/pagination/#required-arguments
if isinstance(queryset, QuerySet) and not queryset.ordered:
queryset = queryset.order_by(queryset.model._meta.pk.name)
paginator = Paginator(queryset.all(), increment)
for page_num in paginator.page_range:
page = paginator.page(page_num)
if each:
for item in page.object_list:
yield item
else:
yield page.object_list
| en | 0.657497 | # -*- coding: utf-8 -*- Load an instance of Model by primary key or modularodm.Q query. Raise an appropriate HTTPError if no record is found or if the query fails to find a unique record :param type Model: StoredObject subclass to query :param pk_or_query: :type pk_or_query: either - a <basestring> representation of the record's primary key, e.g. 'abcdef' - a <QueryBase> subclass query to uniquely select a record, e.g. Q('title', 'eq', 'Entitled') & Q('version', 'eq', 1) :param bool allow_deleted: allow deleleted records? :param basestring display_name: :raises: HTTPError(404) if the record does not exist :raises: HTTPError(400) if no unique record is found :raises: HTTPError(410) if the resource is deleted and allow_deleted = False :return: Model instance # FIXME: Not everything that uses this decorator needs to be markupsafe, but OsfWebRenderer error.mako does... # 451 - Unavailable For Legal Reasons Decorator to autoload a StoredObject instance by primary key and inject into kwargs. Raises an appropriate HTTPError (see #get_or_http_error) :param type Model: database collection model to query (should be a subclass of StoredObject) :param basestring extract_key: named URL field containing the desired primary key to be fetched from the database :param basestring inject_key: name the instance will be accessible as when it's injected as an argument to the function Example usage: :: def get_node(node_id): node = Node.load(node_id) ... becomes import functools autoload_node = functools.partial(autoload, Node, 'node_id', 'node') @autoload_node def get_node(node): ... Paginate a MODM query. :param StoredObject model: Model to query. :param Q query: Optional query object. :param int increment: Page size :param bool each: If True, each record is yielded. If False, pages are yielded. # Pagination requires an order by clause, especially when using Postgres. # see: https://docs.djangoproject.com/en/1.10/topics/pagination/#required-arguments | 2.352381 | 2 |
neptune/internal/client_library/job_development_api/image.py | jiji-online/neptune-cli | 0 | 811 | <reponame>jiji-online/neptune-cli
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future import standard_library
standard_library.install_aliases()
# pylint: disable=wrong-import-position
from future.builtins import object
import base64
import io
import PIL.Image
from neptune.generated.swagger_client import InputImage
from neptune.internal.common.models.parameters_validation import (
of_type_validator,
text_conv,
validate
)
class Image(object):
"""
Represents information about images sent to image channels.
"""
@validate(name=text_conv, description=text_conv, data=of_type_validator(PIL.Image.Image))
def __init__(self, name, description, data):
"""
Creates a new Image.
:param name: Name of the image, displayed in the Channels tab on job's dashboard.
:param description: Description of the image displayed in the Channels tab
on job's dashboard.
:param data: Image data.
:type name: unicode
:type description: unicode
:type data: PIL.Image
"""
self._name = name
self._description = description
self._data = data
def to_input_image(self):
"""
Creates InputImage that can be sent to Neptune.
:return: input image in format appropriate to be sent to Neptune.
:rtype: InputImage
"""
image_buffer = io.BytesIO()
self.data.save(image_buffer, format='PNG')
contents = image_buffer.getvalue()
image_buffer.close()
input_image = InputImage()
input_image.name = self.name
input_image.description = self.description
input_image.data = base64.b64encode(contents).decode('utf-8')
return input_image
@property
def name(self):
"""
Gets name of this Image.
:return: The name of this Image.
:rtype: str
"""
return self._name
@property
def description(self):
"""
Gets description of this Image.
:return: The description of this Image.
:rtype: str
"""
return self._description
@property
def data(self):
"""
Gets data of this Image.
:return: The data of this Image.
:rtype: PIL.Image
"""
return self._data
| # -*- coding: utf-8 -*-
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future import standard_library
standard_library.install_aliases()
# pylint: disable=wrong-import-position
from future.builtins import object
import base64
import io
import PIL.Image
from neptune.generated.swagger_client import InputImage
from neptune.internal.common.models.parameters_validation import (
of_type_validator,
text_conv,
validate
)
class Image(object):
"""
Represents information about images sent to image channels.
"""
@validate(name=text_conv, description=text_conv, data=of_type_validator(PIL.Image.Image))
def __init__(self, name, description, data):
"""
Creates a new Image.
:param name: Name of the image, displayed in the Channels tab on job's dashboard.
:param description: Description of the image displayed in the Channels tab
on job's dashboard.
:param data: Image data.
:type name: unicode
:type description: unicode
:type data: PIL.Image
"""
self._name = name
self._description = description
self._data = data
def to_input_image(self):
"""
Creates InputImage that can be sent to Neptune.
:return: input image in format appropriate to be sent to Neptune.
:rtype: InputImage
"""
image_buffer = io.BytesIO()
self.data.save(image_buffer, format='PNG')
contents = image_buffer.getvalue()
image_buffer.close()
input_image = InputImage()
input_image.name = self.name
input_image.description = self.description
input_image.data = base64.b64encode(contents).decode('utf-8')
return input_image
@property
def name(self):
"""
Gets name of this Image.
:return: The name of this Image.
:rtype: str
"""
return self._name
@property
def description(self):
"""
Gets description of this Image.
:return: The description of this Image.
:rtype: str
"""
return self._description
@property
def data(self):
"""
Gets data of this Image.
:return: The data of this Image.
:rtype: PIL.Image
"""
return self._data | en | 0.761986 | # -*- coding: utf-8 -*- # # Copyright (c) 2016, deepsense.io # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=wrong-import-position Represents information about images sent to image channels. Creates a new Image. :param name: Name of the image, displayed in the Channels tab on job's dashboard. :param description: Description of the image displayed in the Channels tab on job's dashboard. :param data: Image data. :type name: unicode :type description: unicode :type data: PIL.Image Creates InputImage that can be sent to Neptune. :return: input image in format appropriate to be sent to Neptune. :rtype: InputImage Gets name of this Image. :return: The name of this Image. :rtype: str Gets description of this Image. :return: The description of this Image. :rtype: str Gets data of this Image. :return: The data of this Image. :rtype: PIL.Image | 2.085089 | 2 |
src/picome/hukeyboard.py | guibohnert91/picome | 0 | 812 | <filename>src/picome/hukeyboard.py
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
from adafruit_hid.keycode import Keycode
import usb_hid
import time
class HumanKeyboard(object):
def __init__(self):
self.keyboard = Keyboard(usb_hid.devices)
self.keyboardLayout = KeyboardLayoutUS(self.keyboard)
def keyPress(self, keyCode):
"""Send a human like keypress.
Keyword arguments:
keyCode -- the real key to be pressed (example Keycode.SEVEN)
"""
self.keyboard.press(keyCode)
time.sleep(0.1)
self.keyboard.release(keyCode) | <filename>src/picome/hukeyboard.py
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
from adafruit_hid.keycode import Keycode
import usb_hid
import time
class HumanKeyboard(object):
def __init__(self):
self.keyboard = Keyboard(usb_hid.devices)
self.keyboardLayout = KeyboardLayoutUS(self.keyboard)
def keyPress(self, keyCode):
"""Send a human like keypress.
Keyword arguments:
keyCode -- the real key to be pressed (example Keycode.SEVEN)
"""
self.keyboard.press(keyCode)
time.sleep(0.1)
self.keyboard.release(keyCode) | en | 0.606915 | Send a human like keypress. Keyword arguments: keyCode -- the real key to be pressed (example Keycode.SEVEN) | 2.890599 | 3 |
src/solutions/part2/q104_max_bi_tree_depth.py | hychrisli/PyAlgorithms | 0 | 813 | <filename>src/solutions/part2/q104_max_bi_tree_depth.py
from src.base.solution import Solution
from src.tests.part2.q104_test_max_bi_tree_depth import MaxBiTreeDepthTestCases
class MaxBiTreeDepth(Solution):
def gen_test_cases(self):
return MaxBiTreeDepthTestCases()
def run_test(self, input):
return self.maxDepth(input)
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root: return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
if __name__ == '__main__':
sol = MaxBiTreeDepth()
sol.run_tests() | <filename>src/solutions/part2/q104_max_bi_tree_depth.py
from src.base.solution import Solution
from src.tests.part2.q104_test_max_bi_tree_depth import MaxBiTreeDepthTestCases
class MaxBiTreeDepth(Solution):
def gen_test_cases(self):
return MaxBiTreeDepthTestCases()
def run_test(self, input):
return self.maxDepth(input)
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root: return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
if __name__ == '__main__':
sol = MaxBiTreeDepth()
sol.run_tests() | en | 0.313099 | :type root: TreeNode :rtype: int | 2.349594 | 2 |
inventory/admin.py | shakyasaijal/businessAnalytics | 0 | 814 | from django.contrib import admin
from . import models
class SupplierAdmin(admin.ModelAdmin):
list_display = ('supplier_name', 'contact', )
search_fields = ['supplier_name', 'contact', ]
admin.site.register(models.Suppliers, SupplierAdmin)
class InventoryUserAdmin(admin.ModelAdmin):
list_display = ('employee_name', 'user_type')
search_fields = ['employee_name', 'user_type']
list_filter = ("user_type",)
admin.site.register(models.InventoryUser, InventoryUserAdmin)
class ProductsAdmin(admin.ModelAdmin):
list_display = ('name', 'quantity', 'cost_price', 'selling_price',)
search_fields = ['name', 'quantity', 'cost_price', 'selling_price',]
list_filter = ("branch", "supplier",)
admin.site.register(models.Product, ProductsAdmin)
| from django.contrib import admin
from . import models
class SupplierAdmin(admin.ModelAdmin):
list_display = ('supplier_name', 'contact', )
search_fields = ['supplier_name', 'contact', ]
admin.site.register(models.Suppliers, SupplierAdmin)
class InventoryUserAdmin(admin.ModelAdmin):
list_display = ('employee_name', 'user_type')
search_fields = ['employee_name', 'user_type']
list_filter = ("user_type",)
admin.site.register(models.InventoryUser, InventoryUserAdmin)
class ProductsAdmin(admin.ModelAdmin):
list_display = ('name', 'quantity', 'cost_price', 'selling_price',)
search_fields = ['name', 'quantity', 'cost_price', 'selling_price',]
list_filter = ("branch", "supplier",)
admin.site.register(models.Product, ProductsAdmin)
| none | 1 | 1.731184 | 2 |
|
python/testData/resolve/TryExceptElse.py | jnthn/intellij-community | 2 | 815 | <gh_stars>1-10
try:
name = ""
except:
pass
else:
print na<ref>me | try:
name = ""
except:
pass
else:
print na<ref>me | none | 1 | 1.181041 | 1 |
|
pybyte/session.py | ms7m/py-byte | 4 | 816 | <reponame>ms7m/py-byte
import requests
class ByteSession(object):
def __init__(self, token, providedSession=False):
self._userToken = token
if providedSession == False:
self._session = requests.session()
else:
self._session = providedSession
self._session.headers = {
"Authorization": token,
"User-Agent": "byte/0.2 (co.byte.video; build:145; iOS 13.3.0) Alamofire/4.9.1"
}
def session(self):
return self._session | import requests
class ByteSession(object):
def __init__(self, token, providedSession=False):
self._userToken = token
if providedSession == False:
self._session = requests.session()
else:
self._session = providedSession
self._session.headers = {
"Authorization": token,
"User-Agent": "byte/0.2 (co.byte.video; build:145; iOS 13.3.0) Alamofire/4.9.1"
}
def session(self):
return self._session | none | 1 | 2.420807 | 2 |
|
pyqtgraph/examples/template.py | secantsquared/pyqtgraph | 0 | 817 | # -*- coding: utf-8 -*-
"""
Description of example
"""
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, mkQApp
import numpy as np
app = mkQApp()
# win.setWindowTitle('pyqtgraph example: ____')
if __name__ == '__main__':
pg.exec()
| # -*- coding: utf-8 -*-
"""
Description of example
"""
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, mkQApp
import numpy as np
app = mkQApp()
# win.setWindowTitle('pyqtgraph example: ____')
if __name__ == '__main__':
pg.exec()
| en | 0.608899 | # -*- coding: utf-8 -*- Description of example # win.setWindowTitle('pyqtgraph example: ____') | 2.092832 | 2 |
ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py | cas-packone/ambari-chs | 3 | 818 | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
from unittest import TestCase
from mock.mock import patch, MagicMock
class TestHDP206StackAdvisor(TestCase):
def setUp(self):
import imp
import os
testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp206StackAdvisorClassName = 'HDP206StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
stack_advisor = imp.load_module( 'stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE) )
with open(hdp206StackAdvisorPath, 'rb') as fp:
self.stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(self.stack_advisor_impl, hdp206StackAdvisorClassName)
self.stackAdvisor = clazz()
self.maxDiff = None
# substitute method in the instance
self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
@patch('__builtin__.open')
@patch('os.path.exists')
def get_system_min_uid_magic(self, exists_mock, open_mock):
class MagicFile(object):
def read(self):
return """
#test line UID_MIN 200
UID_MIN 500
"""
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
exists_mock.return_value = True
open_mock.return_value = MagicFile()
return self.get_system_min_uid_real()
def test_recommendationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendOnAllHosts(self):
""" Recommend on all hosts for cardinality ALL even if the component has been installed in the cluster before """
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendationIsNotPreferableOnAmbariServer(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_SERVER", "cardinality": "ALL", "category": "MASTER", "is_master": True}]
}
]
services = self.prepareServices(servicesInfo)
localhost = socket.getfqdn()
hosts = self.prepareHosts([localhost, "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_SERVER": ["host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_validationNamenodeAndSecondaryNamenode2Hosts_noMessagesForSameHost(self):
servicesInfo = [
{
"name": "HDFS",
"components": [
{"name": "NAMENODE", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host1"]},
{"name": "SECONDARY_NAMENODE", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "level": "ERROR", "host": "host2"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Ganglia Monitor component should be installed on all hosts in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityExactAmount(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "2", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Exactly 2 Ganglia Monitor components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityAtLeast(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "1+", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "3+", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "At least 3 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationWarnMessagesIfLessThanDefault(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-site":{"properties":{"yarn.nodemanager.resource.memory-mb": "0",
"yarn.scheduler.minimum-allocation-mb": "str"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{"message": "Value is less than the recommended default of 512", "level": "WARN"},
{'message': 'Value should be set for yarn.nodemanager.linux-container-executor.group', 'level': 'ERROR'},
{"message": "Value should be integer", "level": "ERROR"},
{"message": "Value should be set", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationYARNServicecheckQueueName(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-env":{"properties":{"service_check.queue.name": "default"}},
"capacity-scheduler":{"properties":{"capacity-scheduler": "yarn.scheduler.capacity.root.queues=ndfqueue\n"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{'message': 'Queue is not exist, or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
]
self.assertValidationResult(expectedItems, result)
services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue"
expectedItems = []
result = self.stackAdvisor.validateConfigurations(services, hosts)
self.assertValidationResult(expectedItems, result)
def test_validationMinMax(self):
configurations = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "4096",
"some_float_value": "0.5",
"no_min_or_max_attribute_property": "STRING_VALUE"
}
}
}
recommendedDefaults = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "2047",
"some_float_value": "0.8",
"no_min_or_max_attribute_property": "STRING_VALUE"
},
"property_attributes": {
'mapreduce.task.io.sort.mb': {'maximum': '2047'},
'some_float_value': {'minimum': '0.8'}
}
}
}
items = []
self.stackAdvisor.validateMinMax(items, recommendedDefaults, configurations)
expectedItems = [
{
'message': 'Value is greater than the recommended maximum of 2047 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'mapreduce.task.io.sort.mb',
'type': 'configuration'
},
{
'message': 'Value is less than the recommended minimum of 0.8 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'some_float_value',
'type': 'configuration'
}
]
self.assertEquals(expectedItems, items)
def test_validationHostIsNotUsedForNonValuableComponent(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1", "host2"]},
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host1", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinality01TwoHostsAssigned(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "0-1", "category": "MASTER", "is_master": True, "hostnames": ["host1", "host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Between 0 and 1 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationHostIsNotUsed(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host2", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_getConfigurationClusterSummary_withHBaseAnd6gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 8,
"total_mem" : 6291456,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 8,
"disk": 8,
"ram": 6,
"reservedRam": 2,
"hbaseRam": 1,
"minContainerSize": 512,
"totalAvailableRam": 3072,
"containers": 6,
"ramPerContainer": 512,
"mapMemory": 512,
"reduceMemory": 512,
"amMemory": 512,
"referenceHost": hosts["items"][0]["Hosts"]
}
# Test - Cluster data with 1 host
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
# Test - Cluster data with 2 hosts - pick minimum memory
servicesList.append("YARN")
services = services = {"services":
[{"StackServices":
{"service_name" : "YARN",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"NODEMANAGER",
"custom_commands":[
],
"display_name":"NodeManager",
"is_client":"false",
"is_master":"false",
"service_name":"YARN",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
}
],
}],
"configurations": {}
}
hosts["items"][0]["Hosts"]["host_name"] = "host1"
hosts["items"].append({
"Hosts": {
"cpu_count" : 4,
"total_mem" : 500000,
"host_name" : "host2",
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
})
expected["referenceHost"] = hosts["items"][1]["Hosts"]
expected["referenceNodeManagerHost"] = hosts["items"][1]["Hosts"]
expected["amMemory"] = 170.66666666666666
expected["containers"] = 3.0
expected["cpu"] = 4
expected["totalAvailableRam"] = 512
expected["mapMemory"] = 170
expected["minContainerSize"] = 256
expected["reduceMemory"] = 170.66666666666666
expected["ram"] = 0
expected["ramPerContainer"] = 170.66666666666666
expected["reservedRam"] = 1
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, services)
self.assertEquals(result, expected)
def test_getConfigurationClusterSummary_withHBaseAnd48gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 6,
"disk": 6,
"ram": 48,
"reservedRam": 6,
"hbaseRam": 8,
"minContainerSize": 2048,
"totalAvailableRam": 34816,
"containers": 11,
"ramPerContainer": 3072,
"mapMemory": 3072,
"reduceMemory": 3072,
"amMemory": 3072,
"referenceHost": hosts["items"][0]["Hosts"]
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
def test_recommendStormConfigurations(self):
# no AMS
configurations = {}
services = {
"services": [
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
# with AMS
configurations = {}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
"metrics.reporter.register": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
def test_recommendYARNConfigurations(self):
configurations = {}
services = {"configurations": configurations, "services": []}
clusterData = {
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500",
'service_check.queue.name': 'default'
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "1280"
}
}
}
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
def test_recommendMapReduce2Configurations_mapMemoryLessThan2560(self):
configurations = {}
clusterData = {
"mapMemory": 567,
"reduceMemory": 345.6666666666666,
"amMemory": 123.54
}
expected = {
"mapred-site": {
"properties": {
'mapreduce.job.queuename': 'default',
"yarn.app.mapreduce.am.resource.mb": "123",
"yarn.app.mapreduce.am.command-opts": "-Xmx99m",
"mapreduce.map.memory.mb": "567",
"mapreduce.reduce.memory.mb": "345",
"mapreduce.map.java.opts": "-Xmx454m",
"mapreduce.reduce.java.opts": "-Xmx277m",
"mapreduce.task.io.sort.mb": "227"
}
}
}
self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_getConfigurationClusterSummary_noHostsWithoutHBase(self):
servicesList = []
components = []
hosts = {
"items" : []
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
expected = {
"hBaseInstalled": False,
"components": components,
"cpu": 0,
"disk": 0,
"ram": 0,
"reservedRam": 1,
"hbaseRam": 1,
"minContainerSize": 256,
"totalAvailableRam": 512,
"containers": 3,
"ramPerContainer": 170.66666666666666,
"mapMemory": 170,
"reduceMemory": 170.66666666666666,
"amMemory": 170.66666666666666
}
self.assertEquals(result, expected)
def prepareHosts(self, hostsNames):
hosts = { "items": [] }
for hostName in hostsNames:
nextHost = {"Hosts":{"host_name" : hostName}}
hosts["items"].append(nextHost)
return hosts
def prepareServices(self, servicesInfo):
services = { "Versions" : { "stack_name" : "HDP", "stack_version" : "2.0.6" } }
services["services"] = []
for serviceInfo in servicesInfo:
nextService = {"StackServices":{"service_name" : serviceInfo["name"]}}
nextService["components"] = []
for component in serviceInfo["components"]:
nextComponent = {
"StackServiceComponents": {
"component_name": component["name"],
"cardinality": component["cardinality"],
"component_category": component["category"],
"is_master": component["is_master"]
}
}
try:
nextComponent["StackServiceComponents"]["hostnames"] = component["hostnames"]
except KeyError:
nextComponent["StackServiceComponents"]["hostnames"] = []
try:
nextComponent["StackServiceComponents"]["display_name"] = component["display_name"]
except KeyError:
nextComponent["StackServiceComponents"]["display_name"] = component["name"]
nextService["components"].append(nextComponent)
services["services"].append(nextService)
return services
def assertHostLayout(self, componentsHostsMap, recommendation):
blueprintMapping = recommendation["recommendations"]["blueprint"]["host_groups"]
bindings = recommendation["recommendations"]["blueprint_cluster_binding"]["host_groups"]
actualComponentHostsMap = {}
for hostGroup in blueprintMapping:
hostGroupName = hostGroup["name"]
hostsInfos = [binding["hosts"] for binding in bindings if binding["name"] == hostGroupName][0]
hosts = [info["fqdn"] for info in hostsInfos]
for component in hostGroup["components"]:
componentName = component["name"]
try:
actualComponentHostsMap[componentName]
except KeyError, err:
actualComponentHostsMap[componentName] = []
for host in hosts:
if host not in actualComponentHostsMap[componentName]:
actualComponentHostsMap[componentName].append(host)
for componentName in componentsHostsMap.keys():
expectedHosts = componentsHostsMap[componentName]
actualHosts = actualComponentHostsMap[componentName]
self.checkEqual(expectedHosts, actualHosts)
def checkEqual(self, l1, l2):
if not len(l1) == len(l2) or not sorted(l1) == sorted(l2):
raise AssertionError("list1={0}, list2={1}".format(l1, l2))
def assertValidationResult(self, expectedItems, result):
actualItems = []
for item in result["items"]:
next = {"message": item["message"], "level": item["level"]}
try:
next["host"] = item["host"]
except KeyError, err:
pass
actualItems.append(next)
self.checkEqual(expectedItems, actualItems)
def test_recommendHbaseConfigurations(self):
servicesList = ["HBASE"]
configurations = {}
components = []
host_item = {
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
hosts = {
"items" : [host_item for i in range(1, 300)]
}
services = {
"services" : [
],
"configurations": {
"hbase-site": {
"properties": {
"hbase.superuser": "hbase"
}
},
"hbase-env": {
"properties": {
"hbase_user": "hbase123"
}
}
}
}
expected = {
'hbase-site': {
'properties': {
'hbase.superuser': 'hbase123'
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "4096",
"hbase_regionserver_heapsize": "8192",
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
self.stackAdvisor.recommendHbaseConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendRangerConfigurations(self):
clusterData = {}
# Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
services = {
"Versions" : {
"stack_version" : "2.3",
},
"services": [
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "NAMENODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "http://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for not existing DB_FLAVOR and http enabled, HDP-2.3")
# Recommend for DB_FLAVOR POSTGRES and https enabled, HDP-2.3
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "POSTGRES",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.https.port": "7777",
"ranger.service.http.enabled": "false",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR POSTGRES and https enabled, HDP-2.3")
# Recommend for DB_FLAVOR ORACLE and https enabled, HDP-2.2
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "ORACLE",
}
},
"ranger-site": {
"properties": {
"http.enabled": "false",
"https.service.port": "8888",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:8888"
}
},
"ranger-env": {"properties": {}}
}
recommendedConfigurations = {}
services['services'][0]['StackServices']['service_version'] = "0.4.0"
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR ORACLE and https enabled, HDP-2.2")
# Test Recommend LDAP values
services["ambari-server-properties"] = {
"ambari.ldap.isConfigured" : "true",
"authentication.ldap.bindAnonymously" : "false",
"authentication.ldap.baseDn" : "dc=apache,dc=org",
"authentication.ldap.groupNamingAttr" : "cn",
"authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.userObjectClass" : "posixAccount",
"authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.usernameAttribute" : "uid",
"authentication.ldap.dnAttribute" : "dn",
"authentication.ldap.useSSL" : "true",
"authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
"authentication.ldap.groupMembershipAttr" : "memberUid",
"authentication.ldap.groupObjectClass" : "posixGroup",
"authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
}
services["configurations"] = {}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080',
}
},
'ranger-env': {'properties': {}},
'usersync-properties': {
'properties': {
'SYNC_LDAP_URL': 'ldaps://c6403.ambari.apache.org:636',
'SYNC_LDAP_BIND_DN': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
'SYNC_LDAP_USER_OBJECT_CLASS': 'posixAccount',
'SYNC_LDAP_USER_NAME_ATTRIBUTE': 'uid'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Recommend LDAP values")
# Test Ranger Audit properties
del services["ambari-server-properties"]
services["configurations"] = {
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://host1:8080",
}
},
"ranger-env": {
"properties": {
"xasecure.audit.destination.db": "true",
"xasecure.audit.destination.hdfs":"false",
"xasecure.audit.destination.hdfs.dir":"hdfs://localhost:8020/ranger/audit/%app-type%/%time:yyyyMMdd%"
}
},
"ranger-hdfs-plugin-properties": {
"properties": {}
}
}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080'
}
},
'ranger-hdfs-plugin-properties': {
'properties': {
'XAAUDIT.HDFS.IS_ENABLED': 'false',
'XAAUDIT.HDFS.DESTINATION_DIRECTORY': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%',
'XAAUDIT.DB.IS_ENABLED': 'true'
}
},
'ranger-env': {
'properties': {
'xasecure.audit.destination.hdfs.dir': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Ranger Audit properties")
def test_recommendHDFSConfigurations(self):
configurations = {
"hadoop-env": {
"properties": {
"hdfs_user": "hdfs",
"proxyuser_group": "users"
}
},
"hive-env": {
"properties": {
"webhcat_user": "webhcat",
"hive_user": "hive"
}
},
"oozie-env": {
"properties": {
"oozie_user": "oozie"
}
},
"falcon-env": {
"properties": {
"falcon_user": "falcon"
}
}
}
hosts = {
"items": [
{
"href": "/api/v1/hosts/host1",
"Hosts": {
"cpu_count": 1,
"host_name": "c6401.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6401.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 2097152,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
{
"href": "/api/v1/hosts/host2",
"Hosts": {
"cpu_count": 1,
"host_name": "c6402.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6402.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 1048576,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
]}
services = {
"services": [
{
"StackServices": {
"service_name": "HDFS"
}, "components": []
},
{
"StackServices": {
"service_name": "FALCON"
}, "components": []
},
{
"StackServices": {
"service_name": "HIVE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/HIVE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "HIVE_SERVER",
"custom_commands": [],
"display_name": "Hive Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org","c6402.ambari.apache.org"]
}},
{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/WEBHCAT_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "WEBHCAT_SERVER",
"custom_commands": [],
"display_name": "WebHCat Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}}]
},
{
"StackServices": {
"service_name": "OOZIE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/OOZIE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "OOZIE_SERVER",
"custom_commands": [],
"display_name": "Oozie Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}, }]
}],
"configurations": configurations,
"ambari-server-properties": {"ambari-server.user":"ambari_user"}
}
clusterData = {
"totalAvailableRam": 2048
}
ambariHostName = socket.getfqdn()
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site':
{'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
configurations["hadoop-env"]["properties"]['hdfs_user'] = "hdfs1"
changedConfigurations = [{"type":"hadoop-env",
"name":"hdfs_user",
"old_value":"hdfs"}]
services["changed-configurations"] = changedConfigurations
services['configurations'] = configurations
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site': {'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.hdfs1.groups': '*',
'hadoop.proxyuser.hdfs1.hosts': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'},
'property_attributes':
{'hadoop.proxyuser.hdfs.groups': {'delete': 'true'},
'hadoop.proxyuser.hdfs.hosts': {'delete': 'true'}}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs1',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Verify dfs.namenode.rpc-address is recommended to be deleted when NN HA
configurations["hdfs-site"]["properties"]['dfs.internal.nameservices'] = "mycluster"
configurations["hdfs-site"]["properties"]['dfs.ha.namenodes.mycluster'] = "nn1,nn2"
services['configurations'] = configurations
expected["hdfs-site"] = {
'properties': {
'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024',
'dfs.internal.nameservices': 'mycluster',
'dfs.ha.namenodes.mycluster': 'nn1,nn2'
},
'property_attributes': {
'dfs.namenode.rpc-address': {
'delete': 'true'
}
}
}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_getHostNamesWithComponent(self):
services = {
"services": [
{
"StackServices": {
"service_name": "SERVICE"
},
"components": [
{
"StackServiceComponents": {
"component_name": "COMPONENT",
"hostnames": ["host1","host2","host3"]
}
}
]
}
],
"configurations": {}
}
result = self.stackAdvisor.getHostNamesWithComponent("SERVICE","COMPONENT", services)
expected = ["host1","host2","host3"]
self.assertEquals(result, expected)
def test_getZKHostPortString(self):
configurations = {
"zoo.cfg": {
"properties": {
'clientPort': "2183"
}
}
}
services = {
"services": [
{
"StackServices": {
"service_name": "ZOOKEEPER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "ZOOKEEPER_SERVER",
"hostnames": ["zk.host1","zk.host2","zk.host3"]
}
}, {
"StackServiceComponents": {
"component_name": "ZOOKEEPER_CLIENT",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
result = self.stackAdvisor.getZKHostPortString(services)
expected = "zk.host1:2183,zk.host2:2183,zk.host3:2183"
self.assertEquals(result, expected)
def test_validateHDFSConfigurations(self):
configurations = {}
services = ''
hosts = ''
#Default configuration
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '1024'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
#Value is less then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '512'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertTrue(res)
#Value is begger then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '2048'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
def test_validateHDFSConfigurationsEnv(self):
configurations = {}
# 1) ok: namenode_heapsize > recommended
recommendedDefaults = {'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256'}
properties = {'namenode_heapsize': '2048',
'namenode_opt_newsize' : '300',
'namenode_opt_maxnewsize' : '300'}
res_expected = []
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) fail: namenode_heapsize, namenode_opt_maxnewsize < recommended
properties['namenode_heapsize'] = '1022'
properties['namenode_opt_maxnewsize'] = '255'
res_expected = [{'config-type': 'hadoop-env',
'message': 'Value is less than the recommended default of 1024',
'type': 'configuration',
'config-name': 'namenode_heapsize',
'level': 'WARN'},
{'config-name': 'namenode_opt_maxnewsize',
'config-type': 'hadoop-env',
'level': 'WARN',
'message': 'Value is less than the recommended default of 256',
'type': 'configuration'}]
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
def test_validateAmsHbaseSiteConfigurations(self):
configurations = {
"hdfs-site": {
"properties": {
'dfs.datanode.data.dir': "/hadoop/data"
}
},
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
}
},
"ams-site": {
"properties": {
"timeline.metrics.service.operation.mode": "embedded"
}
}
}
recommendedDefaults = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
host = {
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152,
"disk_info": [
{
"available": str(15<<30), # 15 GB
"type": "ext4",
"mountpoint": "/"
}
]
}
}
hosts = {
"items" : [
host
]
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "METRICS_COLLECTOR",
"hostnames": ["host1"]
}
}, {
"StackServiceComponents": {
"component_name": "METRICS_MONITOR",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "DATANODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
# only 1 partition, enough disk space, no warnings
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# 1 partition, no enough disk space
host['Hosts']['disk_info'] = [
{
"available" : '1',
"type" : "ext4",
"mountpoint" : "/"
}
]
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Ambari Metrics disk space requirements not met. '
'\nRecommended disk space for partition / is 10G',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# 2 partitions
host['Hosts']['disk_info'] = [
{
"available": str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/grid/0"
},
{
"available" : str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/"
}
]
recommendedDefaults = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'It is not recommended to use root partition for hbase.rootdir',
'type': 'configuration'
},
{
'config-name': 'hbase.tmp.dir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics temporary data. '
'/ partition is already used as hbase.rootdir to store metrics data',
'type': 'configuration'
},
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics data. '
'/ is already used by datanode to store HDFS data',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# incorrect hbase.rootdir in distributed mode
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'In distributed mode hbase.rootdir should point to HDFS.',
'type': 'configuration'
},
{
'config-name': 'hbase.cluster.distributed',
'config-type': 'ams-hbase-site',
'level': 'ERROR',
'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_validateStormSiteConfigurations(self):
configurations = {
"storm-site": {
"properties": {
'metrics.reporter.register': "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
}
}
recommendedDefaults = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
properties = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
# positive
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = []
self.assertEquals(res, expected)
properties['metrics.reporter.register'] = ''
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = [
{'config-name': 'metrics.reporter.register',
'config-type': 'storm-site',
'level': 'WARN',
'message': 'Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter '
'to report the metrics to Ambari Metrics service.',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_getHostsWithComponent(self):
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"DATANODE",
"custom_commands":[
],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"0+",
"component_category":"SLAVE",
"component_name":"JOURNALNODE",
"custom_commands":[
],
"display_name":"JournalNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
"Dependencies":{
"component_name":"HDFS_CLIENT",
"dependent_component_name":"JOURNALNODE",
"dependent_service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2"
}
}
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1-2",
"component_category":"MASTER",
"component_name":"NAMENODE",
"custom_commands":[
"DECOMMISSION",
"REBALANCEHDFS"
],
"display_name":"NameNode",
"is_client":"false",
"is_master":"true",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host2"
]
},
"dependencies":[
]
},
],
}],
"configurations": {}
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
},
{
"href" : "/api/v1/hosts/host2",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host2",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host2",
"rack_info" : "/default-rack",
"total_mem" : 1048576
}
},
]
}
datanodes = self.stackAdvisor.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(len(datanodes), 2)
self.assertEquals(datanodes, hosts["items"])
datanode = self.stackAdvisor.getHostWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(datanode, hosts["items"][0])
namenodes = self.stackAdvisor.getHostsWithComponent("HDFS", "NAMENODE", services, hosts)
self.assertEquals(len(namenodes), 1)
# [host2]
self.assertEquals(namenodes, [hosts["items"][1]])
namenode = self.stackAdvisor.getHostWithComponent("HDFS", "NAMENODE", services, hosts)
# host2
self.assertEquals(namenode, hosts["items"][1])
# not installed
nodemanager = self.stackAdvisor.getHostWithComponent("YARN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
# unknown component
unknown_component = self.stackAdvisor.getHostWithComponent("YARN", "UNKNOWN", services, hosts)
self.assertEquals(nodemanager, None)
# unknown service
unknown_component = self.stackAdvisor.getHostWithComponent("UNKNOWN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
def test_mergeValidators(self):
childValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"newconf": "new2.3"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
parentValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.2",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.2",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.2",
"hbase-env": "validateHBASEEnvConfigurations2.2"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"}
}
expected = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"hbase-env": "validateHBASEEnvConfigurations2.2",
"newconf": "new2.3"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
self.stackAdvisor.mergeValidators(parentValidators, childValidators)
self.assertEquals(expected, parentValidators)
def test_getProperMountPoint(self):
hostInfo = None
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo = {"some_key": []}
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo["disk_info"] = []
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# root mountpoint with low space available
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# tmpfs with more space available
hostInfo["disk_info"].append(
{
"available" : "2",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "tmpfs",
"mountpoint" : "/boot/grub"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "4",
"type" : "tmpfs",
"mountpoint" : "/mnt/external_hdd"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# virtualbox fs with more space available
hostInfo["disk_info"].append(
{
"available" : "5",
"type" : "vboxsf",
"mountpoint" : "/vagrant"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "6",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
self.assertEquals(["/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "7",
"type" : "ext4",
"mountpoint" : "/grid/1"
}
)
self.assertEquals(["/grid/1", "/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
def test_validateNonRootFs(self):
hostInfo = {"disk_info": [
{
"available" : "2",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
recommendedDefaults = {"property1": "file:///var/dir"}
# only / mountpoint - no warning
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
# More preferable /grid/0 mountpoint - warning
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
recommendedDefaults = {"property1": "file:///grid/0/var/dir"}
warn = self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo)
self.assertTrue(warn != None)
self.assertEquals({'message': 'It is not recommended to use root partition for property1', 'level': 'WARN'}, warn)
# Set by user /var mountpoint, which is non-root , but not preferable - no warning
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/var"
}
)
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
def test_validatorEnoughDiskSpace(self):
reqiuredDiskSpace = 1048576
errorMsg = "Ambari Metrics disk space requirements not met. \n" \
"Recommended disk space for partition / is 1G"
# local FS, enough space
hostInfo = {"disk_info": [
{
"available" : "1048578",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# local FS, no enough space
hostInfo = {"disk_info": [
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
]}
warn = self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace)
self.assertTrue(warn != None)
self.assertEquals({'message': errorMsg, 'level': 'WARN'}, warn)
# non-local FS, HDFS
properties = {"property1": "hdfs://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# non-local FS, WASB
properties = {"property1": "wasb://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
def test_round_to_n(self):
self.assertEquals(self.stack_advisor_impl.round_to_n(0), 0)
self.assertEquals(self.stack_advisor_impl.round_to_n(1000), 1024)
self.assertEquals(self.stack_advisor_impl.round_to_n(2000), 2048)
self.assertEquals(self.stack_advisor_impl.round_to_n(4097), 4096)
def test_getMountPointForDir(self):
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/"]), "/")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("file:///var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("hdfs:///hdfs_path", ["/var", "/"]), None)
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("relative/path", ["/var", "/"]), None)
def test_getValidatorEqualsToRecommendedItem(self):
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value1"}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), None)
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value2"}
expected = {'message': 'It is recommended to set value value2 for property property1', 'level': 'WARN'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {}
recommendedDefaults = {"property1": "value2"}
expected = {'level': 'ERROR', 'message': 'Value should be set for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {"property1": "value1"}
recommendedDefaults = {}
expected = {'level': 'ERROR', 'message': 'Value should be recommended for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
def test_getServicesSiteProperties(self):
import imp, os
testDirectory = os.path.dirname(os.path.abspath(__file__))
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
stack_advisor = imp.load_source('stack_advisor', hdp206StackAdvisorPath)
services = {
"services": [
{
"StackServices": {
"service_name": "RANGER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
siteProperties = stack_advisor.getServicesSiteProperties(services, "ranger-admin-site")
self.assertEquals(siteProperties, expected)
def test_createComponentLayoutRecommendations_addService_1freeHost(self):
"""
Test that already installed slaves are not added to any free hosts (not having any component installed)
as part of recommendation received during Add service operation.
For already installed services, recommendation for installed components should match the existing layout
"""
services = {
"services" : [
{
"StackServices" : {
"service_name" : "HDFS"
},
"components" : [ {
"StackServiceComponents" : {
"cardinality" : "1+",
"component_category" : "SLAVE",
"component_name" : "DATANODE",
"hostnames" : [ "c6401.ambari.apache.org" ]
}
} ]
} ]
}
hosts = self.prepareHosts(["c6401.ambari.apache.org", "c6402.ambari.apache.org"])
recommendations = self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
"""
Recommendation received should be as below:
{
'blueprint': {
'host_groups': [{
'name': 'host-group-1',
'components': []
}, {
'name': 'host-group-2',
'components': [{
'name': 'DATANODE'
}]
}]
},
'blueprint_cluster_binding': {
'host_groups': [{
'hosts': [{
'fqdn': 'c6402.ambari.apache.org'
}],
'name': 'host-group-1'
}, {
'hosts': [{
'fqdn': 'c6401.ambari.apache.org'
}],
'name': 'host-group-2'
}]
}
}
"""
# Assert that the list is empty for host-group-1
self.assertFalse(recommendations['blueprint']['host_groups'][0]['components'])
# Assert that DATANODE is placed on host-group-2
self.assertEquals(recommendations['blueprint']['host_groups'][1]['components'][0]['name'], 'DATANODE')
| '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
from unittest import TestCase
from mock.mock import patch, MagicMock
class TestHDP206StackAdvisor(TestCase):
def setUp(self):
import imp
import os
testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp206StackAdvisorClassName = 'HDP206StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
stack_advisor = imp.load_module( 'stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE) )
with open(hdp206StackAdvisorPath, 'rb') as fp:
self.stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(self.stack_advisor_impl, hdp206StackAdvisorClassName)
self.stackAdvisor = clazz()
self.maxDiff = None
# substitute method in the instance
self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
@patch('__builtin__.open')
@patch('os.path.exists')
def get_system_min_uid_magic(self, exists_mock, open_mock):
class MagicFile(object):
def read(self):
return """
#test line UID_MIN 200
UID_MIN 500
"""
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
exists_mock.return_value = True
open_mock.return_value = MagicFile()
return self.get_system_min_uid_real()
def test_recommendationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendOnAllHosts(self):
""" Recommend on all hosts for cardinality ALL even if the component has been installed in the cluster before """
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendationIsNotPreferableOnAmbariServer(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_SERVER", "cardinality": "ALL", "category": "MASTER", "is_master": True}]
}
]
services = self.prepareServices(servicesInfo)
localhost = socket.getfqdn()
hosts = self.prepareHosts([localhost, "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_SERVER": ["host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_validationNamenodeAndSecondaryNamenode2Hosts_noMessagesForSameHost(self):
servicesInfo = [
{
"name": "HDFS",
"components": [
{"name": "NAMENODE", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host1"]},
{"name": "SECONDARY_NAMENODE", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "level": "ERROR", "host": "host2"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Ganglia Monitor component should be installed on all hosts in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityExactAmount(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "2", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Exactly 2 Ganglia Monitor components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityAtLeast(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "1+", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "3+", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "At least 3 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationWarnMessagesIfLessThanDefault(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-site":{"properties":{"yarn.nodemanager.resource.memory-mb": "0",
"yarn.scheduler.minimum-allocation-mb": "str"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{"message": "Value is less than the recommended default of 512", "level": "WARN"},
{'message': 'Value should be set for yarn.nodemanager.linux-container-executor.group', 'level': 'ERROR'},
{"message": "Value should be integer", "level": "ERROR"},
{"message": "Value should be set", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationYARNServicecheckQueueName(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-env":{"properties":{"service_check.queue.name": "default"}},
"capacity-scheduler":{"properties":{"capacity-scheduler": "yarn.scheduler.capacity.root.queues=ndfqueue\n"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{'message': 'Queue is not exist, or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
]
self.assertValidationResult(expectedItems, result)
services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue"
expectedItems = []
result = self.stackAdvisor.validateConfigurations(services, hosts)
self.assertValidationResult(expectedItems, result)
def test_validationMinMax(self):
configurations = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "4096",
"some_float_value": "0.5",
"no_min_or_max_attribute_property": "STRING_VALUE"
}
}
}
recommendedDefaults = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "2047",
"some_float_value": "0.8",
"no_min_or_max_attribute_property": "STRING_VALUE"
},
"property_attributes": {
'mapreduce.task.io.sort.mb': {'maximum': '2047'},
'some_float_value': {'minimum': '0.8'}
}
}
}
items = []
self.stackAdvisor.validateMinMax(items, recommendedDefaults, configurations)
expectedItems = [
{
'message': 'Value is greater than the recommended maximum of 2047 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'mapreduce.task.io.sort.mb',
'type': 'configuration'
},
{
'message': 'Value is less than the recommended minimum of 0.8 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'some_float_value',
'type': 'configuration'
}
]
self.assertEquals(expectedItems, items)
def test_validationHostIsNotUsedForNonValuableComponent(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1", "host2"]},
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host1", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinality01TwoHostsAssigned(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "0-1", "category": "MASTER", "is_master": True, "hostnames": ["host1", "host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Between 0 and 1 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationHostIsNotUsed(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host2", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_getConfigurationClusterSummary_withHBaseAnd6gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 8,
"total_mem" : 6291456,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 8,
"disk": 8,
"ram": 6,
"reservedRam": 2,
"hbaseRam": 1,
"minContainerSize": 512,
"totalAvailableRam": 3072,
"containers": 6,
"ramPerContainer": 512,
"mapMemory": 512,
"reduceMemory": 512,
"amMemory": 512,
"referenceHost": hosts["items"][0]["Hosts"]
}
# Test - Cluster data with 1 host
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
# Test - Cluster data with 2 hosts - pick minimum memory
servicesList.append("YARN")
services = services = {"services":
[{"StackServices":
{"service_name" : "YARN",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"NODEMANAGER",
"custom_commands":[
],
"display_name":"NodeManager",
"is_client":"false",
"is_master":"false",
"service_name":"YARN",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
}
],
}],
"configurations": {}
}
hosts["items"][0]["Hosts"]["host_name"] = "host1"
hosts["items"].append({
"Hosts": {
"cpu_count" : 4,
"total_mem" : 500000,
"host_name" : "host2",
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
})
expected["referenceHost"] = hosts["items"][1]["Hosts"]
expected["referenceNodeManagerHost"] = hosts["items"][1]["Hosts"]
expected["amMemory"] = 170.66666666666666
expected["containers"] = 3.0
expected["cpu"] = 4
expected["totalAvailableRam"] = 512
expected["mapMemory"] = 170
expected["minContainerSize"] = 256
expected["reduceMemory"] = 170.66666666666666
expected["ram"] = 0
expected["ramPerContainer"] = 170.66666666666666
expected["reservedRam"] = 1
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, services)
self.assertEquals(result, expected)
def test_getConfigurationClusterSummary_withHBaseAnd48gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 6,
"disk": 6,
"ram": 48,
"reservedRam": 6,
"hbaseRam": 8,
"minContainerSize": 2048,
"totalAvailableRam": 34816,
"containers": 11,
"ramPerContainer": 3072,
"mapMemory": 3072,
"reduceMemory": 3072,
"amMemory": 3072,
"referenceHost": hosts["items"][0]["Hosts"]
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
def test_recommendStormConfigurations(self):
# no AMS
configurations = {}
services = {
"services": [
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
# with AMS
configurations = {}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
"metrics.reporter.register": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
def test_recommendYARNConfigurations(self):
configurations = {}
services = {"configurations": configurations, "services": []}
clusterData = {
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500",
'service_check.queue.name': 'default'
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "1280"
}
}
}
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
def test_recommendMapReduce2Configurations_mapMemoryLessThan2560(self):
configurations = {}
clusterData = {
"mapMemory": 567,
"reduceMemory": 345.6666666666666,
"amMemory": 123.54
}
expected = {
"mapred-site": {
"properties": {
'mapreduce.job.queuename': 'default',
"yarn.app.mapreduce.am.resource.mb": "123",
"yarn.app.mapreduce.am.command-opts": "-Xmx99m",
"mapreduce.map.memory.mb": "567",
"mapreduce.reduce.memory.mb": "345",
"mapreduce.map.java.opts": "-Xmx454m",
"mapreduce.reduce.java.opts": "-Xmx277m",
"mapreduce.task.io.sort.mb": "227"
}
}
}
self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_getConfigurationClusterSummary_noHostsWithoutHBase(self):
servicesList = []
components = []
hosts = {
"items" : []
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
expected = {
"hBaseInstalled": False,
"components": components,
"cpu": 0,
"disk": 0,
"ram": 0,
"reservedRam": 1,
"hbaseRam": 1,
"minContainerSize": 256,
"totalAvailableRam": 512,
"containers": 3,
"ramPerContainer": 170.66666666666666,
"mapMemory": 170,
"reduceMemory": 170.66666666666666,
"amMemory": 170.66666666666666
}
self.assertEquals(result, expected)
def prepareHosts(self, hostsNames):
hosts = { "items": [] }
for hostName in hostsNames:
nextHost = {"Hosts":{"host_name" : hostName}}
hosts["items"].append(nextHost)
return hosts
def prepareServices(self, servicesInfo):
services = { "Versions" : { "stack_name" : "HDP", "stack_version" : "2.0.6" } }
services["services"] = []
for serviceInfo in servicesInfo:
nextService = {"StackServices":{"service_name" : serviceInfo["name"]}}
nextService["components"] = []
for component in serviceInfo["components"]:
nextComponent = {
"StackServiceComponents": {
"component_name": component["name"],
"cardinality": component["cardinality"],
"component_category": component["category"],
"is_master": component["is_master"]
}
}
try:
nextComponent["StackServiceComponents"]["hostnames"] = component["hostnames"]
except KeyError:
nextComponent["StackServiceComponents"]["hostnames"] = []
try:
nextComponent["StackServiceComponents"]["display_name"] = component["display_name"]
except KeyError:
nextComponent["StackServiceComponents"]["display_name"] = component["name"]
nextService["components"].append(nextComponent)
services["services"].append(nextService)
return services
def assertHostLayout(self, componentsHostsMap, recommendation):
blueprintMapping = recommendation["recommendations"]["blueprint"]["host_groups"]
bindings = recommendation["recommendations"]["blueprint_cluster_binding"]["host_groups"]
actualComponentHostsMap = {}
for hostGroup in blueprintMapping:
hostGroupName = hostGroup["name"]
hostsInfos = [binding["hosts"] for binding in bindings if binding["name"] == hostGroupName][0]
hosts = [info["fqdn"] for info in hostsInfos]
for component in hostGroup["components"]:
componentName = component["name"]
try:
actualComponentHostsMap[componentName]
except KeyError, err:
actualComponentHostsMap[componentName] = []
for host in hosts:
if host not in actualComponentHostsMap[componentName]:
actualComponentHostsMap[componentName].append(host)
for componentName in componentsHostsMap.keys():
expectedHosts = componentsHostsMap[componentName]
actualHosts = actualComponentHostsMap[componentName]
self.checkEqual(expectedHosts, actualHosts)
def checkEqual(self, l1, l2):
if not len(l1) == len(l2) or not sorted(l1) == sorted(l2):
raise AssertionError("list1={0}, list2={1}".format(l1, l2))
def assertValidationResult(self, expectedItems, result):
actualItems = []
for item in result["items"]:
next = {"message": item["message"], "level": item["level"]}
try:
next["host"] = item["host"]
except KeyError, err:
pass
actualItems.append(next)
self.checkEqual(expectedItems, actualItems)
def test_recommendHbaseConfigurations(self):
servicesList = ["HBASE"]
configurations = {}
components = []
host_item = {
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
hosts = {
"items" : [host_item for i in range(1, 300)]
}
services = {
"services" : [
],
"configurations": {
"hbase-site": {
"properties": {
"hbase.superuser": "hbase"
}
},
"hbase-env": {
"properties": {
"hbase_user": "hbase123"
}
}
}
}
expected = {
'hbase-site': {
'properties': {
'hbase.superuser': 'hbase123'
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "4096",
"hbase_regionserver_heapsize": "8192",
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
self.stackAdvisor.recommendHbaseConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendRangerConfigurations(self):
clusterData = {}
# Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
services = {
"Versions" : {
"stack_version" : "2.3",
},
"services": [
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "NAMENODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "http://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for not existing DB_FLAVOR and http enabled, HDP-2.3")
# Recommend for DB_FLAVOR POSTGRES and https enabled, HDP-2.3
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "POSTGRES",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.https.port": "7777",
"ranger.service.http.enabled": "false",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR POSTGRES and https enabled, HDP-2.3")
# Recommend for DB_FLAVOR ORACLE and https enabled, HDP-2.2
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "ORACLE",
}
},
"ranger-site": {
"properties": {
"http.enabled": "false",
"https.service.port": "8888",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:8888"
}
},
"ranger-env": {"properties": {}}
}
recommendedConfigurations = {}
services['services'][0]['StackServices']['service_version'] = "0.4.0"
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR ORACLE and https enabled, HDP-2.2")
# Test Recommend LDAP values
services["ambari-server-properties"] = {
"ambari.ldap.isConfigured" : "true",
"authentication.ldap.bindAnonymously" : "false",
"authentication.ldap.baseDn" : "dc=apache,dc=org",
"authentication.ldap.groupNamingAttr" : "cn",
"authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.userObjectClass" : "posixAccount",
"authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.usernameAttribute" : "uid",
"authentication.ldap.dnAttribute" : "dn",
"authentication.ldap.useSSL" : "true",
"authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
"authentication.ldap.groupMembershipAttr" : "memberUid",
"authentication.ldap.groupObjectClass" : "posixGroup",
"authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
}
services["configurations"] = {}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080',
}
},
'ranger-env': {'properties': {}},
'usersync-properties': {
'properties': {
'SYNC_LDAP_URL': 'ldaps://c6403.ambari.apache.org:636',
'SYNC_LDAP_BIND_DN': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
'SYNC_LDAP_USER_OBJECT_CLASS': 'posixAccount',
'SYNC_LDAP_USER_NAME_ATTRIBUTE': 'uid'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Recommend LDAP values")
# Test Ranger Audit properties
del services["ambari-server-properties"]
services["configurations"] = {
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://host1:8080",
}
},
"ranger-env": {
"properties": {
"xasecure.audit.destination.db": "true",
"xasecure.audit.destination.hdfs":"false",
"xasecure.audit.destination.hdfs.dir":"hdfs://localhost:8020/ranger/audit/%app-type%/%time:yyyyMMdd%"
}
},
"ranger-hdfs-plugin-properties": {
"properties": {}
}
}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080'
}
},
'ranger-hdfs-plugin-properties': {
'properties': {
'XAAUDIT.HDFS.IS_ENABLED': 'false',
'XAAUDIT.HDFS.DESTINATION_DIRECTORY': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%',
'XAAUDIT.DB.IS_ENABLED': 'true'
}
},
'ranger-env': {
'properties': {
'xasecure.audit.destination.hdfs.dir': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Ranger Audit properties")
def test_recommendHDFSConfigurations(self):
configurations = {
"hadoop-env": {
"properties": {
"hdfs_user": "hdfs",
"proxyuser_group": "users"
}
},
"hive-env": {
"properties": {
"webhcat_user": "webhcat",
"hive_user": "hive"
}
},
"oozie-env": {
"properties": {
"oozie_user": "oozie"
}
},
"falcon-env": {
"properties": {
"falcon_user": "falcon"
}
}
}
hosts = {
"items": [
{
"href": "/api/v1/hosts/host1",
"Hosts": {
"cpu_count": 1,
"host_name": "c6401.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6401.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 2097152,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
{
"href": "/api/v1/hosts/host2",
"Hosts": {
"cpu_count": 1,
"host_name": "c6402.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6402.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 1048576,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
]}
services = {
"services": [
{
"StackServices": {
"service_name": "HDFS"
}, "components": []
},
{
"StackServices": {
"service_name": "FALCON"
}, "components": []
},
{
"StackServices": {
"service_name": "HIVE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/HIVE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "HIVE_SERVER",
"custom_commands": [],
"display_name": "Hive Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org","c6402.ambari.apache.org"]
}},
{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/WEBHCAT_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "WEBHCAT_SERVER",
"custom_commands": [],
"display_name": "WebHCat Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}}]
},
{
"StackServices": {
"service_name": "OOZIE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/OOZIE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "OOZIE_SERVER",
"custom_commands": [],
"display_name": "Oozie Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}, }]
}],
"configurations": configurations,
"ambari-server-properties": {"ambari-server.user":"ambari_user"}
}
clusterData = {
"totalAvailableRam": 2048
}
ambariHostName = socket.getfqdn()
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site':
{'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
configurations["hadoop-env"]["properties"]['hdfs_user'] = "hdfs1"
changedConfigurations = [{"type":"hadoop-env",
"name":"hdfs_user",
"old_value":"hdfs"}]
services["changed-configurations"] = changedConfigurations
services['configurations'] = configurations
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site': {'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.hdfs1.groups': '*',
'hadoop.proxyuser.hdfs1.hosts': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'},
'property_attributes':
{'hadoop.proxyuser.hdfs.groups': {'delete': 'true'},
'hadoop.proxyuser.hdfs.hosts': {'delete': 'true'}}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs1',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Verify dfs.namenode.rpc-address is recommended to be deleted when NN HA
configurations["hdfs-site"]["properties"]['dfs.internal.nameservices'] = "mycluster"
configurations["hdfs-site"]["properties"]['dfs.ha.namenodes.mycluster'] = "nn1,nn2"
services['configurations'] = configurations
expected["hdfs-site"] = {
'properties': {
'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024',
'dfs.internal.nameservices': 'mycluster',
'dfs.ha.namenodes.mycluster': 'nn1,nn2'
},
'property_attributes': {
'dfs.namenode.rpc-address': {
'delete': 'true'
}
}
}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_getHostNamesWithComponent(self):
services = {
"services": [
{
"StackServices": {
"service_name": "SERVICE"
},
"components": [
{
"StackServiceComponents": {
"component_name": "COMPONENT",
"hostnames": ["host1","host2","host3"]
}
}
]
}
],
"configurations": {}
}
result = self.stackAdvisor.getHostNamesWithComponent("SERVICE","COMPONENT", services)
expected = ["host1","host2","host3"]
self.assertEquals(result, expected)
def test_getZKHostPortString(self):
configurations = {
"zoo.cfg": {
"properties": {
'clientPort': "2183"
}
}
}
services = {
"services": [
{
"StackServices": {
"service_name": "ZOOKEEPER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "ZOOKEEPER_SERVER",
"hostnames": ["zk.host1","zk.host2","zk.host3"]
}
}, {
"StackServiceComponents": {
"component_name": "ZOOKEEPER_CLIENT",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
result = self.stackAdvisor.getZKHostPortString(services)
expected = "zk.host1:2183,zk.host2:2183,zk.host3:2183"
self.assertEquals(result, expected)
def test_validateHDFSConfigurations(self):
configurations = {}
services = ''
hosts = ''
#Default configuration
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '1024'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
#Value is less then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '512'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertTrue(res)
#Value is begger then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '2048'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
def test_validateHDFSConfigurationsEnv(self):
configurations = {}
# 1) ok: namenode_heapsize > recommended
recommendedDefaults = {'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256'}
properties = {'namenode_heapsize': '2048',
'namenode_opt_newsize' : '300',
'namenode_opt_maxnewsize' : '300'}
res_expected = []
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) fail: namenode_heapsize, namenode_opt_maxnewsize < recommended
properties['namenode_heapsize'] = '1022'
properties['namenode_opt_maxnewsize'] = '255'
res_expected = [{'config-type': 'hadoop-env',
'message': 'Value is less than the recommended default of 1024',
'type': 'configuration',
'config-name': 'namenode_heapsize',
'level': 'WARN'},
{'config-name': 'namenode_opt_maxnewsize',
'config-type': 'hadoop-env',
'level': 'WARN',
'message': 'Value is less than the recommended default of 256',
'type': 'configuration'}]
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
def test_validateAmsHbaseSiteConfigurations(self):
configurations = {
"hdfs-site": {
"properties": {
'dfs.datanode.data.dir': "/hadoop/data"
}
},
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
}
},
"ams-site": {
"properties": {
"timeline.metrics.service.operation.mode": "embedded"
}
}
}
recommendedDefaults = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
host = {
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152,
"disk_info": [
{
"available": str(15<<30), # 15 GB
"type": "ext4",
"mountpoint": "/"
}
]
}
}
hosts = {
"items" : [
host
]
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "METRICS_COLLECTOR",
"hostnames": ["host1"]
}
}, {
"StackServiceComponents": {
"component_name": "METRICS_MONITOR",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "DATANODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
# only 1 partition, enough disk space, no warnings
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# 1 partition, no enough disk space
host['Hosts']['disk_info'] = [
{
"available" : '1',
"type" : "ext4",
"mountpoint" : "/"
}
]
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Ambari Metrics disk space requirements not met. '
'\nRecommended disk space for partition / is 10G',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# 2 partitions
host['Hosts']['disk_info'] = [
{
"available": str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/grid/0"
},
{
"available" : str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/"
}
]
recommendedDefaults = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'It is not recommended to use root partition for hbase.rootdir',
'type': 'configuration'
},
{
'config-name': 'hbase.tmp.dir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics temporary data. '
'/ partition is already used as hbase.rootdir to store metrics data',
'type': 'configuration'
},
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics data. '
'/ is already used by datanode to store HDFS data',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# incorrect hbase.rootdir in distributed mode
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'In distributed mode hbase.rootdir should point to HDFS.',
'type': 'configuration'
},
{
'config-name': 'hbase.cluster.distributed',
'config-type': 'ams-hbase-site',
'level': 'ERROR',
'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_validateStormSiteConfigurations(self):
configurations = {
"storm-site": {
"properties": {
'metrics.reporter.register': "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
}
}
recommendedDefaults = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
properties = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
# positive
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = []
self.assertEquals(res, expected)
properties['metrics.reporter.register'] = ''
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = [
{'config-name': 'metrics.reporter.register',
'config-type': 'storm-site',
'level': 'WARN',
'message': 'Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter '
'to report the metrics to Ambari Metrics service.',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_getHostsWithComponent(self):
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"DATANODE",
"custom_commands":[
],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"0+",
"component_category":"SLAVE",
"component_name":"JOURNALNODE",
"custom_commands":[
],
"display_name":"JournalNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
"Dependencies":{
"component_name":"HDFS_CLIENT",
"dependent_component_name":"JOURNALNODE",
"dependent_service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2"
}
}
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1-2",
"component_category":"MASTER",
"component_name":"NAMENODE",
"custom_commands":[
"DECOMMISSION",
"REBALANCEHDFS"
],
"display_name":"NameNode",
"is_client":"false",
"is_master":"true",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host2"
]
},
"dependencies":[
]
},
],
}],
"configurations": {}
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
},
{
"href" : "/api/v1/hosts/host2",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host2",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host2",
"rack_info" : "/default-rack",
"total_mem" : 1048576
}
},
]
}
datanodes = self.stackAdvisor.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(len(datanodes), 2)
self.assertEquals(datanodes, hosts["items"])
datanode = self.stackAdvisor.getHostWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(datanode, hosts["items"][0])
namenodes = self.stackAdvisor.getHostsWithComponent("HDFS", "NAMENODE", services, hosts)
self.assertEquals(len(namenodes), 1)
# [host2]
self.assertEquals(namenodes, [hosts["items"][1]])
namenode = self.stackAdvisor.getHostWithComponent("HDFS", "NAMENODE", services, hosts)
# host2
self.assertEquals(namenode, hosts["items"][1])
# not installed
nodemanager = self.stackAdvisor.getHostWithComponent("YARN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
# unknown component
unknown_component = self.stackAdvisor.getHostWithComponent("YARN", "UNKNOWN", services, hosts)
self.assertEquals(nodemanager, None)
# unknown service
unknown_component = self.stackAdvisor.getHostWithComponent("UNKNOWN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
def test_mergeValidators(self):
childValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"newconf": "new2.3"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
parentValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.2",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.2",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.2",
"hbase-env": "validateHBASEEnvConfigurations2.2"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"}
}
expected = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"hbase-env": "validateHBASEEnvConfigurations2.2",
"newconf": "new2.3"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
self.stackAdvisor.mergeValidators(parentValidators, childValidators)
self.assertEquals(expected, parentValidators)
def test_getProperMountPoint(self):
hostInfo = None
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo = {"some_key": []}
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo["disk_info"] = []
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# root mountpoint with low space available
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# tmpfs with more space available
hostInfo["disk_info"].append(
{
"available" : "2",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "tmpfs",
"mountpoint" : "/boot/grub"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "4",
"type" : "tmpfs",
"mountpoint" : "/mnt/external_hdd"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# virtualbox fs with more space available
hostInfo["disk_info"].append(
{
"available" : "5",
"type" : "vboxsf",
"mountpoint" : "/vagrant"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "6",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
self.assertEquals(["/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "7",
"type" : "ext4",
"mountpoint" : "/grid/1"
}
)
self.assertEquals(["/grid/1", "/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
def test_validateNonRootFs(self):
hostInfo = {"disk_info": [
{
"available" : "2",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
recommendedDefaults = {"property1": "file:///var/dir"}
# only / mountpoint - no warning
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
# More preferable /grid/0 mountpoint - warning
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
recommendedDefaults = {"property1": "file:///grid/0/var/dir"}
warn = self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo)
self.assertTrue(warn != None)
self.assertEquals({'message': 'It is not recommended to use root partition for property1', 'level': 'WARN'}, warn)
# Set by user /var mountpoint, which is non-root , but not preferable - no warning
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/var"
}
)
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
def test_validatorEnoughDiskSpace(self):
reqiuredDiskSpace = 1048576
errorMsg = "Ambari Metrics disk space requirements not met. \n" \
"Recommended disk space for partition / is 1G"
# local FS, enough space
hostInfo = {"disk_info": [
{
"available" : "1048578",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# local FS, no enough space
hostInfo = {"disk_info": [
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
]}
warn = self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace)
self.assertTrue(warn != None)
self.assertEquals({'message': errorMsg, 'level': 'WARN'}, warn)
# non-local FS, HDFS
properties = {"property1": "hdfs://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# non-local FS, WASB
properties = {"property1": "wasb://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
def test_round_to_n(self):
self.assertEquals(self.stack_advisor_impl.round_to_n(0), 0)
self.assertEquals(self.stack_advisor_impl.round_to_n(1000), 1024)
self.assertEquals(self.stack_advisor_impl.round_to_n(2000), 2048)
self.assertEquals(self.stack_advisor_impl.round_to_n(4097), 4096)
def test_getMountPointForDir(self):
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/"]), "/")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("file:///var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("hdfs:///hdfs_path", ["/var", "/"]), None)
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("relative/path", ["/var", "/"]), None)
def test_getValidatorEqualsToRecommendedItem(self):
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value1"}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), None)
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value2"}
expected = {'message': 'It is recommended to set value value2 for property property1', 'level': 'WARN'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {}
recommendedDefaults = {"property1": "value2"}
expected = {'level': 'ERROR', 'message': 'Value should be set for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {"property1": "value1"}
recommendedDefaults = {}
expected = {'level': 'ERROR', 'message': 'Value should be recommended for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
def test_getServicesSiteProperties(self):
import imp, os
testDirectory = os.path.dirname(os.path.abspath(__file__))
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
stack_advisor = imp.load_source('stack_advisor', hdp206StackAdvisorPath)
services = {
"services": [
{
"StackServices": {
"service_name": "RANGER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
siteProperties = stack_advisor.getServicesSiteProperties(services, "ranger-admin-site")
self.assertEquals(siteProperties, expected)
def test_createComponentLayoutRecommendations_addService_1freeHost(self):
"""
Test that already installed slaves are not added to any free hosts (not having any component installed)
as part of recommendation received during Add service operation.
For already installed services, recommendation for installed components should match the existing layout
"""
services = {
"services" : [
{
"StackServices" : {
"service_name" : "HDFS"
},
"components" : [ {
"StackServiceComponents" : {
"cardinality" : "1+",
"component_category" : "SLAVE",
"component_name" : "DATANODE",
"hostnames" : [ "c6401.ambari.apache.org" ]
}
} ]
} ]
}
hosts = self.prepareHosts(["c6401.ambari.apache.org", "c6402.ambari.apache.org"])
recommendations = self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
"""
Recommendation received should be as below:
{
'blueprint': {
'host_groups': [{
'name': 'host-group-1',
'components': []
}, {
'name': 'host-group-2',
'components': [{
'name': 'DATANODE'
}]
}]
},
'blueprint_cluster_binding': {
'host_groups': [{
'hosts': [{
'fqdn': 'c6402.ambari.apache.org'
}],
'name': 'host-group-1'
}, {
'hosts': [{
'fqdn': 'c6401.ambari.apache.org'
}],
'name': 'host-group-2'
}]
}
}
"""
# Assert that the list is empty for host-group-1
self.assertFalse(recommendations['blueprint']['host_groups'][0]['components'])
# Assert that DATANODE is placed on host-group-2
self.assertEquals(recommendations['blueprint']['host_groups'][1]['components'][0]['name'], 'DATANODE')
| en | 0.772616 | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # substitute method in the instance #test line UID_MIN 200 UID_MIN 500 Recommend on all hosts for cardinality ALL even if the component has been installed in the cluster before # Test - Cluster data with 1 host # Test - Cluster data with 2 hosts - pick minimum memory # no AMS # with AMS # Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3 # Recommend for DB_FLAVOR POSTGRES and https enabled, HDP-2.3 # Recommend for DB_FLAVOR ORACLE and https enabled, HDP-2.2 # Test Recommend LDAP values # Test Ranger Audit properties # Verify dfs.namenode.rpc-address is recommended to be deleted when NN HA #Default configuration #Value is less then expected #Value is begger then expected # 1) ok: namenode_heapsize > recommended # 2) fail: namenode_heapsize, namenode_opt_maxnewsize < recommended # 15 GB # only 1 partition, enough disk space, no warnings # 1 partition, no enough disk space # 2 partitions # 15 GB # 15 GB # dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings # incorrect hbase.rootdir in distributed mode # positive # [host2] # host2 # not installed # unknown component # unknown service # root mountpoint with low space available # tmpfs with more space available # /boot with more space available # /boot with more space available # virtualbox fs with more space available # proper mountpoint with more space available # proper mountpoint with more space available # only / mountpoint - no warning # More preferable /grid/0 mountpoint - warning # Set by user /var mountpoint, which is non-root , but not preferable - no warning # local FS, enough space # local FS, no enough space # non-local FS, HDFS # non-local FS, WASB Test that already installed slaves are not added to any free hosts (not having any component installed) as part of recommendation received during Add service operation. For already installed services, recommendation for installed components should match the existing layout Recommendation received should be as below: { 'blueprint': { 'host_groups': [{ 'name': 'host-group-1', 'components': [] }, { 'name': 'host-group-2', 'components': [{ 'name': 'DATANODE' }] }] }, 'blueprint_cluster_binding': { 'host_groups': [{ 'hosts': [{ 'fqdn': 'c6402.ambari.apache.org' }], 'name': 'host-group-1' }, { 'hosts': [{ 'fqdn': 'c6401.ambari.apache.org' }], 'name': 'host-group-2' }] } } # Assert that the list is empty for host-group-1 # Assert that DATANODE is placed on host-group-2 | 1.627717 | 2 |
debugging/code/multiprocess_main.py | awesome-archive/python-debugging-skills | 0 | 819 | # -*- encoding: utf-8 -*-
import multiprocessing as mp
import time
from pudb.remote import set_trace
def worker(worker_id):
""" Simple worker process"""
i = 0
while i < 10:
if worker_id == 1: # debug process with id 1
set_trace(term_size=(80, 24))
time.sleep(1) # represents some work
print('In Process {}, i:{}'.format(worker_id, i))
i = i + 1
if __name__ == '__main__':
processes = []
for p_id in range(2): # 2 worker processes
p = mp.Process(target=worker, args=(p_id,))
p.start()
processes.append(p)
for p in processes:
p.join()
| # -*- encoding: utf-8 -*-
import multiprocessing as mp
import time
from pudb.remote import set_trace
def worker(worker_id):
""" Simple worker process"""
i = 0
while i < 10:
if worker_id == 1: # debug process with id 1
set_trace(term_size=(80, 24))
time.sleep(1) # represents some work
print('In Process {}, i:{}'.format(worker_id, i))
i = i + 1
if __name__ == '__main__':
processes = []
for p_id in range(2): # 2 worker processes
p = mp.Process(target=worker, args=(p_id,))
p.start()
processes.append(p)
for p in processes:
p.join()
| en | 0.899526 | # -*- encoding: utf-8 -*- Simple worker process # debug process with id 1 # represents some work # 2 worker processes | 2.962076 | 3 |
lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py | bytesnake/Enzyme | 0 | 820 | <filename>lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py
"""
Test calling user defined functions using expression evaluation.
This test checks that typesystem lookup works correctly for typedefs of
untagged structures.
Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestExprLookupAnonStructTypedef(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
# Find the breakpoint
self.line = line_number('main.cpp', '// lldb testsuite break')
@expectedFailureAll(oslist=["windows"])
@expectedFailureAll(
oslist=['linux'],
archs=['arm'],
bugnumber="llvm.org/pr27868")
def test(self):
"""Test typedeffed untagged struct arguments for function call expressions"""
self.build()
self.runCmd("file "+self.getBuildArtifact("a.out"),
CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"main.cpp",
self.line,
num_expected_locations=-1,
loc_exact=True
)
self.runCmd("run", RUN_SUCCEEDED)
self.expect("expr multiply(&s)", substrs=['$0 = 1'])
| <filename>lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py
"""
Test calling user defined functions using expression evaluation.
This test checks that typesystem lookup works correctly for typedefs of
untagged structures.
Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestExprLookupAnonStructTypedef(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
# Find the breakpoint
self.line = line_number('main.cpp', '// lldb testsuite break')
@expectedFailureAll(oslist=["windows"])
@expectedFailureAll(
oslist=['linux'],
archs=['arm'],
bugnumber="llvm.org/pr27868")
def test(self):
"""Test typedeffed untagged struct arguments for function call expressions"""
self.build()
self.runCmd("file "+self.getBuildArtifact("a.out"),
CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"main.cpp",
self.line,
num_expected_locations=-1,
loc_exact=True
)
self.runCmd("run", RUN_SUCCEEDED)
self.expect("expr multiply(&s)", substrs=['$0 = 1'])
| en | 0.521418 | Test calling user defined functions using expression evaluation. This test checks that typesystem lookup works correctly for typedefs of untagged structures. Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790 # Find the breakpoint Test typedeffed untagged struct arguments for function call expressions | 2.190689 | 2 |
main.py | ThomasDLi/simple-photo-editor | 1 | 821 | <filename>main.py
from PIL import Image, ImageEnhance
user_account_name = "Thomas.Li26"
def main():
mode = input("Specify image editing mode. Type DEEPFRY, STRETCH, BRIGHTNESS, SHARPEN, or INVERT: ")
if mode == "DEEPFRY":
DEEPFRY()
if mode == "STRETCH":
STRETCH()
if mode == "INVERT":
INVERT()
if mode == "BRIGHTNESS":
BRIGHTNESS()
if mode == "SHARPEN":
SHARPEN()
def DEEPFRY():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
factor = float(input("Specify deepfry amount (0-100): "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def STRETCH():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
factor = int(input("Specify width: "))
factor2 = int(input("Specify height: "))
im_output = im.resize((factor,factor2))
im_output.save('more-contrast-image.png')
im_output.show()
def INVERT():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
im_output = enhancer.enhance(-1)
im_output.save('more-contrast-image.png')
im_output.show()
def BRIGHTNESS():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Brightness(im)
factor = float(input("Specify brightness amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def SHARPEN():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Sharpness(im)
factor = float(input("Specify sharpening amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
if __name__ == "__main__":
main()
| <filename>main.py
from PIL import Image, ImageEnhance
user_account_name = "Thomas.Li26"
def main():
mode = input("Specify image editing mode. Type DEEPFRY, STRETCH, BRIGHTNESS, SHARPEN, or INVERT: ")
if mode == "DEEPFRY":
DEEPFRY()
if mode == "STRETCH":
STRETCH()
if mode == "INVERT":
INVERT()
if mode == "BRIGHTNESS":
BRIGHTNESS()
if mode == "SHARPEN":
SHARPEN()
def DEEPFRY():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
factor = float(input("Specify deepfry amount (0-100): "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def STRETCH():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
factor = int(input("Specify width: "))
factor2 = int(input("Specify height: "))
im_output = im.resize((factor,factor2))
im_output.save('more-contrast-image.png')
im_output.show()
def INVERT():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
im_output = enhancer.enhance(-1)
im_output.save('more-contrast-image.png')
im_output.show()
def BRIGHTNESS():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Brightness(im)
factor = float(input("Specify brightness amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def SHARPEN():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Sharpness(im)
factor = float(input("Specify sharpening amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
if __name__ == "__main__":
main()
| none | 1 | 3.589193 | 4 |
|
scripts/field/Curbrock_Summon1.py | G00dBye/YYMS | 54 | 822 | <reponame>G00dBye/YYMS<gh_stars>10-100
# Curbrock Summon 2
CURBROCK2 = 9400930 # MOD ID
CURBROCKS_ESCAPE_ROUTE_VER2 = 600050040 # MAP ID
CURBROCKS_ESCAPE_ROUTE_VER3 = 600050050 # MAP ID 2
sm.spawnMob(CURBROCK2, 190, -208, False)
sm.createClock(1800)
sm.addEvent(sm.invokeAfterDelay(1800000, "warp", CURBROCKS_ESCAPE_ROUTE_VER3, 0))
sm.waitForMobDeath(CURBROCK2)
sm.warp(CURBROCKS_ESCAPE_ROUTE_VER2)
sm.stopEvents() | # Curbrock Summon 2
CURBROCK2 = 9400930 # MOD ID
CURBROCKS_ESCAPE_ROUTE_VER2 = 600050040 # MAP ID
CURBROCKS_ESCAPE_ROUTE_VER3 = 600050050 # MAP ID 2
sm.spawnMob(CURBROCK2, 190, -208, False)
sm.createClock(1800)
sm.addEvent(sm.invokeAfterDelay(1800000, "warp", CURBROCKS_ESCAPE_ROUTE_VER3, 0))
sm.waitForMobDeath(CURBROCK2)
sm.warp(CURBROCKS_ESCAPE_ROUTE_VER2)
sm.stopEvents() | en | 0.270833 | # Curbrock Summon 2 # MOD ID # MAP ID # MAP ID 2 | 1.21848 | 1 |
trainloops/listeners/cluster_killswitch.py | Gerryflap/master_thesis | 0 | 823 | <reponame>Gerryflap/master_thesis
"""
Cancelling jobs on the University cluster forces programs to instantly quit,
which sometimes crashes cluster nodes.
As a remedy, this killswitch listener will stop the experiment in a nicer way to prevent this from happening.
The experiment will be stopped if a file named "stop" is encountered in the results folder of the experiment.
The existence of this file is checked after each epoch.
"""
import os
from trainloops.listeners.listener import Listener
class KillSwitchListener(Listener):
def __init__(self, experiment_path):
super().__init__()
self.path = os.path.join(experiment_path, "stop")
def initialize(self):
pass
def report(self, state_dict):
if os.path.exists(self.path):
exit()
| """
Cancelling jobs on the University cluster forces programs to instantly quit,
which sometimes crashes cluster nodes.
As a remedy, this killswitch listener will stop the experiment in a nicer way to prevent this from happening.
The experiment will be stopped if a file named "stop" is encountered in the results folder of the experiment.
The existence of this file is checked after each epoch.
"""
import os
from trainloops.listeners.listener import Listener
class KillSwitchListener(Listener):
def __init__(self, experiment_path):
super().__init__()
self.path = os.path.join(experiment_path, "stop")
def initialize(self):
pass
def report(self, state_dict):
if os.path.exists(self.path):
exit() | en | 0.923372 | Cancelling jobs on the University cluster forces programs to instantly quit, which sometimes crashes cluster nodes. As a remedy, this killswitch listener will stop the experiment in a nicer way to prevent this from happening. The experiment will be stopped if a file named "stop" is encountered in the results folder of the experiment. The existence of this file is checked after each epoch. | 2.800732 | 3 |
authors/apps/notifications/views.py | andela/ah-backend-spaces- | 2 | 824 | <gh_stars>1-10
from rest_framework import status
from rest_framework.generics import (
RetrieveUpdateAPIView, CreateAPIView,
RetrieveUpdateDestroyAPIView
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from ..authentication.backends import JWTAuthentication
from ..authentication.models import User
from .models import Notifications
from .renderers import (
NotificationsJSONRenderer
)
from .serializers import (
NotificationsAPIViewSerializer, GetNotificationsAPIViewSerializer
)
class NotificationsAPIView(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (NotificationsJSONRenderer,)
def put(self, request):
"""
This class method is used to update a users article
"""
serializer_class = NotificationsAPIViewSerializer
notification = request.data.get('notification', {})
user_data = JWTAuthentication().authenticate(request)
# append user_id from token to article variable for later validations in serializers
notification["user_id"] = user_data[1]
serializer = serializer_class(data=notification)
serializer.is_valid(raise_exception=True)
# update the notification statue to True
serializer.update_read_status(serializer.data["notifications"])
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get(self, request):
"""
retrieve all notifications of a user
"""
# decode users authentication token
user_data = JWTAuthentication().authenticate(request)
# get user notifications details from the Notifications table in the database
notifications = Notifications.objects.filter(notification_owner=user_data[1]).values(
"id", "article_id", "notification_title", "notification_body",
"notification_owner", "read_status"
)
# create a list of notifications
# the action below is done by use of list comprehension
list_of_notifications = [i for i in notifications]
return Response({"notifications": list_of_notifications}, status=status.HTTP_200_OK)
| from rest_framework import status
from rest_framework.generics import (
RetrieveUpdateAPIView, CreateAPIView,
RetrieveUpdateDestroyAPIView
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from ..authentication.backends import JWTAuthentication
from ..authentication.models import User
from .models import Notifications
from .renderers import (
NotificationsJSONRenderer
)
from .serializers import (
NotificationsAPIViewSerializer, GetNotificationsAPIViewSerializer
)
class NotificationsAPIView(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (NotificationsJSONRenderer,)
def put(self, request):
"""
This class method is used to update a users article
"""
serializer_class = NotificationsAPIViewSerializer
notification = request.data.get('notification', {})
user_data = JWTAuthentication().authenticate(request)
# append user_id from token to article variable for later validations in serializers
notification["user_id"] = user_data[1]
serializer = serializer_class(data=notification)
serializer.is_valid(raise_exception=True)
# update the notification statue to True
serializer.update_read_status(serializer.data["notifications"])
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get(self, request):
"""
retrieve all notifications of a user
"""
# decode users authentication token
user_data = JWTAuthentication().authenticate(request)
# get user notifications details from the Notifications table in the database
notifications = Notifications.objects.filter(notification_owner=user_data[1]).values(
"id", "article_id", "notification_title", "notification_body",
"notification_owner", "read_status"
)
# create a list of notifications
# the action below is done by use of list comprehension
list_of_notifications = [i for i in notifications]
return Response({"notifications": list_of_notifications}, status=status.HTTP_200_OK) | en | 0.779426 | This class method is used to update a users article # append user_id from token to article variable for later validations in serializers # update the notification statue to True retrieve all notifications of a user # decode users authentication token # get user notifications details from the Notifications table in the database # create a list of notifications # the action below is done by use of list comprehension | 2.454758 | 2 |
scripts/common/frozendict.py | bopopescu/build | 0 | 825 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a frozen dictionary-like object"""
import collections
import copy
import common.memo as memo
class frozendict(collections.Mapping):
"""A frozen dictionary class"""
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
@memo.memo_i()
def __hash__(self):
return hash(self.itemtuple())
def __str__(self):
return str(self._data)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, str(self))
def __eq__(self, other):
return self._data == other
def __ne__(self, other):
return not self == other
def __deepcopy__(self, _memo):
return copy.deepcopy(self._data)
@memo.memo_i()
def itemtuple(self):
return tuple(sorted(self.iteritems()))
def mutableDict(self):
"""
Returns a mutable dictionary copy, replacing 'frozendict' with 'dict's.
This function uses the 'copy.deepcopy' method to create a mutable deep copy
of the dictionary.
Note that due to the one-size-fits-all behavior of 'deepcopy', the result
can be anything from heavyhanded to incorrect depending on the contents of
the dictionary. The caller should make sure they understand the operation
and its behavior on all of the dictionary's subtypes before using it.
Returns: (dict) A mutable clone of the dictionary and its members.
"""
return copy.deepcopy(self)
def extend(self, **kwargs):
"""Returns a copy of this object with the 'kwargs' fields updated."""
ndata = self.mutableDict()
ndata.update(kwargs)
return type(self)(**ndata)
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a frozen dictionary-like object"""
import collections
import copy
import common.memo as memo
class frozendict(collections.Mapping):
"""A frozen dictionary class"""
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
@memo.memo_i()
def __hash__(self):
return hash(self.itemtuple())
def __str__(self):
return str(self._data)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, str(self))
def __eq__(self, other):
return self._data == other
def __ne__(self, other):
return not self == other
def __deepcopy__(self, _memo):
return copy.deepcopy(self._data)
@memo.memo_i()
def itemtuple(self):
return tuple(sorted(self.iteritems()))
def mutableDict(self):
"""
Returns a mutable dictionary copy, replacing 'frozendict' with 'dict's.
This function uses the 'copy.deepcopy' method to create a mutable deep copy
of the dictionary.
Note that due to the one-size-fits-all behavior of 'deepcopy', the result
can be anything from heavyhanded to incorrect depending on the contents of
the dictionary. The caller should make sure they understand the operation
and its behavior on all of the dictionary's subtypes before using it.
Returns: (dict) A mutable clone of the dictionary and its members.
"""
return copy.deepcopy(self)
def extend(self, **kwargs):
"""Returns a copy of this object with the 'kwargs' fields updated."""
ndata = self.mutableDict()
ndata.update(kwargs)
return type(self)(**ndata)
| en | 0.870293 | # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Implements a frozen dictionary-like object A frozen dictionary class Returns a mutable dictionary copy, replacing 'frozendict' with 'dict's. This function uses the 'copy.deepcopy' method to create a mutable deep copy of the dictionary. Note that due to the one-size-fits-all behavior of 'deepcopy', the result can be anything from heavyhanded to incorrect depending on the contents of the dictionary. The caller should make sure they understand the operation and its behavior on all of the dictionary's subtypes before using it. Returns: (dict) A mutable clone of the dictionary and its members. Returns a copy of this object with the 'kwargs' fields updated. | 3.126149 | 3 |
avatar/__init__.py | yogeshkheri/geonode-avatar | 3 | 826 | <reponame>yogeshkheri/geonode-avatar
__version__ = '5.0.2'
| __version__ = '5.0.2' | none | 1 | 1.054056 | 1 |
|
__init__.py | mmanganel/neurecon | 0 | 827 | from neurecon.reconstruction import reconstruct
| from neurecon.reconstruction import reconstruct
| none | 1 | 0.986902 | 1 |
|
Fundamentals/Reversed Strings.py | gnvidal/Codewars | 49 | 828 | def solution(string):
return string[::-1]
| def solution(string):
return string[::-1]
| none | 1 | 1.720679 | 2 |
|
python/flexflow/keras/datasets/cifar.py | zmxdream/FlexFlow | 455 | 829 | # -*- coding: utf-8 -*-
"""Utilities common to CIFAR10 and CIFAR100 datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from six.moves import cPickle
def load_batch(fpath, label_key='labels'):
"""Internal utility for parsing CIFAR data.
# Arguments
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
# Returns
A tuple `(data, labels)`.
"""
with open(fpath, 'rb') as f:
if sys.version_info < (3,):
d = cPickle.load(f)
else:
d = cPickle.load(f, encoding='bytes')
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels | # -*- coding: utf-8 -*-
"""Utilities common to CIFAR10 and CIFAR100 datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from six.moves import cPickle
def load_batch(fpath, label_key='labels'):
"""Internal utility for parsing CIFAR data.
# Arguments
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
# Returns
A tuple `(data, labels)`.
"""
with open(fpath, 'rb') as f:
if sys.version_info < (3,):
d = cPickle.load(f)
else:
d = cPickle.load(f, encoding='bytes')
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels | en | 0.673146 | # -*- coding: utf-8 -*- Utilities common to CIFAR10 and CIFAR100 datasets. Internal utility for parsing CIFAR data. # Arguments fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. # Returns A tuple `(data, labels)`. # decode utf8 | 2.242605 | 2 |
day7/p2.py | Seralpa/Advent-of-code | 1 | 830 | <gh_stars>1-10
def getNumBags(color):
if color=='':
return 0
numBags=1
for bag in rules[color]:
numBags+=bag[1]*getNumBags(bag[0])
return numBags
with open('day7/input.txt') as f:
rules=dict([l.split(' contain') for l in f.read().replace(' bags', '').replace(' bag', '').replace('.', '').replace(' no other', '0 ').splitlines()])
for key in rules:
rules[key]=[(d[2:].strip(), int(d[:2].strip())) for d in rules[key].split(', ')]
print(getNumBags('shiny gold')-1) #-1 cause shiny bag not included | def getNumBags(color):
if color=='':
return 0
numBags=1
for bag in rules[color]:
numBags+=bag[1]*getNumBags(bag[0])
return numBags
with open('day7/input.txt') as f:
rules=dict([l.split(' contain') for l in f.read().replace(' bags', '').replace(' bag', '').replace('.', '').replace(' no other', '0 ').splitlines()])
for key in rules:
rules[key]=[(d[2:].strip(), int(d[:2].strip())) for d in rules[key].split(', ')]
print(getNumBags('shiny gold')-1) #-1 cause shiny bag not included | en | 0.947985 | #-1 cause shiny bag not included | 3.077073 | 3 |
adv/luther.py | 6tennis/dl | 0 | 831 | from core.advbase import *
from slot.d import *
def module():
return Luther
class Luther(Adv):
a1 = ('cc',0.10,'hit15')
conf = {}
conf ['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s1
`s2, seq=5 and cancel
`s3, seq=5 and cancel or fsc
`fs, seq=5
"""
coab = ['Blade', 'Xander', 'Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | from core.advbase import *
from slot.d import *
def module():
return Luther
class Luther(Adv):
a1 = ('cc',0.10,'hit15')
conf = {}
conf ['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s1
`s2, seq=5 and cancel
`s3, seq=5 and cancel or fsc
`fs, seq=5
"""
coab = ['Blade', 'Xander', 'Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | en | 0.5972 | `dragon `s1 `s2, seq=5 and cancel `s3, seq=5 and cancel or fsc `fs, seq=5 | 1.914287 | 2 |
wandb/sdk/data_types/image.py | macio232/client | 0 | 832 | import hashlib
from io import BytesIO
import logging
import os
from typing import Any, cast, Dict, List, Optional, Sequence, Type, TYPE_CHECKING, Union
from pkg_resources import parse_version
import wandb
from wandb import util
from ._private import MEDIA_TMP
from .base_types.media import BatchableMedia, Media
from .helper_types.bounding_boxes_2d import BoundingBoxes2D
from .helper_types.classes import Classes
from .helper_types.image_mask import ImageMask
if TYPE_CHECKING: # pragma: no cover
import matplotlib # type: ignore
import numpy as np # type: ignore
import PIL # type: ignore
import torch # type: ignore
from wandb.apis.public import Artifact as PublicArtifact
from ..wandb_artifacts import Artifact as LocalArtifact
from ..wandb_run import Run as LocalRun
ImageDataType = Union[
"matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray"
]
ImageDataOrPathType = Union[str, "Image", ImageDataType]
TorchTensorType = Union["torch.Tensor", "torch.Variable"]
def _server_accepts_image_filenames() -> bool:
# Newer versions of wandb accept large image filenames arrays
# but older versions would have issues with this.
max_cli_version = util._get_max_cli_version()
if max_cli_version is None:
return False
return parse_version("0.12.10") <= parse_version(max_cli_version)
class Image(BatchableMedia):
"""Format images for logging to W&B.
Arguments:
data_or_path: (numpy array, string, io) Accepts numpy array of
image data, or a PIL image. The class attempts to infer
the data format and converts it.
mode: (string) The PIL mode for an image. Most common are "L", "RGB",
"RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes.
caption: (string) Label for display of image.
Examples:
### Create a wandb.Image from a numpy array
<!--yeadoc-test:log-image-numpy->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Create a wandb.Image from a PILImage
<!--yeadoc-test:log-image-pil->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
"""
MAX_ITEMS = 108
# PIL limit
MAX_DIMENSION = 65500
_log_type = "image-file"
format: Optional[str]
_grouping: Optional[int]
_caption: Optional[str]
_width: Optional[int]
_height: Optional[int]
_image: Optional["PIL.Image"]
_classes: Optional["Classes"]
_boxes: Optional[Dict[str, "BoundingBoxes2D"]]
_masks: Optional[Dict[str, "ImageMask"]]
def __init__(
self,
data_or_path: "ImageDataOrPathType",
mode: Optional[str] = None,
caption: Optional[str] = None,
grouping: Optional[int] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
super(Image, self).__init__()
# TODO: We should remove grouping, it's a terrible name and I don't
# think anyone uses it.
self._grouping = None
self._caption = None
self._width = None
self._height = None
self._image = None
self._classes = None
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, str):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping: Optional[int] = None,
caption: Optional[str] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
total_classes = {}
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final: Dict[str, BoundingBoxes2D] = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
boxes_final[key] = BoundingBoxes2D(box_item, key)
total_classes.update(boxes_final[key]._class_labels)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final: Dict[str, ImageMask] = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
masks_final[key] = ImageMask(mask_item, key)
if hasattr(masks_final[key], "_val"):
total_classes.update(masks_final[key]._val["class_labels"])
self._masks = masks_final
if classes is not None:
if isinstance(classes, Classes):
total_classes.update(
{val["id"]: val["name"] for val in classes._class_set}
)
else:
total_classes.update({val["id"]: val["name"] for val in classes})
if len(total_classes.keys()) > 0:
self._classes = Classes(
[
{"id": key, "name": total_classes[key]}
for key in total_classes.keys()
]
)
self._width, self._height = self.image.size # type: ignore
self._free_ram()
def _initialize_from_wbimage(self, wbimage: "Image") -> None:
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path: str) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data: "ImageDataType", mode: str = None,) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(MEDIA_TMP.name, str(util.generate_id()) + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls: Type["Image"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Image":
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks: Optional[Dict[str, ImageMask]] = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes: Optional[Dict[str, BoundingBoxes2D]] = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls: Type["Image"]) -> str:
return os.path.join("media", "images")
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
ignore_copy_err: Optional[bool] = None,
) -> None:
super().bind_to_run(run, key, step, id_, ignore_copy_err=ignore_copy_err)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
class_id = hashlib.md5(
str(self._classes._class_set).encode("utf-8")
).hexdigest()
class_name = os.path.join("media", "classes", class_id + "_cls",)
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data: "np.ndarray") -> str:
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data: "np.ndarray") -> "np.ndarray":
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls: Type["Image"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0].image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image: "Image") -> bool:
img_width, img_height = image.image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
if _server_accepts_image_filenames():
meta["filenames"] = [obj["path"] for obj in jsons]
else:
wandb.termwarn(
"Unable to log image array filenames. In some cases, this can prevent images from being"
"viewed in the UI. Please upgrade your wandb server",
repeat=False,
)
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_mask_groups: List[Optional[dict]] = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_box_groups: List[Optional[dict]] = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls: Type["Image"], images: Sequence["Media"]
) -> Union[bool, Sequence[Optional[str]]]:
return cls.captions(images)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Image):
return False
else:
self_image = self.image
other_image = other.image
if self_image is not None:
self_image = list(self_image.getdata())
if other_image is not None:
other_image = list(other_image.getdata())
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self_image == other_image
and self._classes == other._classes
)
def to_data_array(self) -> List[Any]:
res = []
if self.image is not None:
data = list(self.image.getdata())
for i in range(self.image.height):
res.append(data[i * self.image.width : (i + 1) * self.image.width])
self._free_ram()
return res
def _free_ram(self) -> None:
if self._path is not None:
self._image = None
@property
def image(self) -> Optional["PIL.Image"]:
if self._image is None:
if self._path is not None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._image = pil_image.open(self._path)
self._image.load()
return self._image
| import hashlib
from io import BytesIO
import logging
import os
from typing import Any, cast, Dict, List, Optional, Sequence, Type, TYPE_CHECKING, Union
from pkg_resources import parse_version
import wandb
from wandb import util
from ._private import MEDIA_TMP
from .base_types.media import BatchableMedia, Media
from .helper_types.bounding_boxes_2d import BoundingBoxes2D
from .helper_types.classes import Classes
from .helper_types.image_mask import ImageMask
if TYPE_CHECKING: # pragma: no cover
import matplotlib # type: ignore
import numpy as np # type: ignore
import PIL # type: ignore
import torch # type: ignore
from wandb.apis.public import Artifact as PublicArtifact
from ..wandb_artifacts import Artifact as LocalArtifact
from ..wandb_run import Run as LocalRun
ImageDataType = Union[
"matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray"
]
ImageDataOrPathType = Union[str, "Image", ImageDataType]
TorchTensorType = Union["torch.Tensor", "torch.Variable"]
def _server_accepts_image_filenames() -> bool:
# Newer versions of wandb accept large image filenames arrays
# but older versions would have issues with this.
max_cli_version = util._get_max_cli_version()
if max_cli_version is None:
return False
return parse_version("0.12.10") <= parse_version(max_cli_version)
class Image(BatchableMedia):
"""Format images for logging to W&B.
Arguments:
data_or_path: (numpy array, string, io) Accepts numpy array of
image data, or a PIL image. The class attempts to infer
the data format and converts it.
mode: (string) The PIL mode for an image. Most common are "L", "RGB",
"RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes.
caption: (string) Label for display of image.
Examples:
### Create a wandb.Image from a numpy array
<!--yeadoc-test:log-image-numpy->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Create a wandb.Image from a PILImage
<!--yeadoc-test:log-image-pil->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
"""
MAX_ITEMS = 108
# PIL limit
MAX_DIMENSION = 65500
_log_type = "image-file"
format: Optional[str]
_grouping: Optional[int]
_caption: Optional[str]
_width: Optional[int]
_height: Optional[int]
_image: Optional["PIL.Image"]
_classes: Optional["Classes"]
_boxes: Optional[Dict[str, "BoundingBoxes2D"]]
_masks: Optional[Dict[str, "ImageMask"]]
def __init__(
self,
data_or_path: "ImageDataOrPathType",
mode: Optional[str] = None,
caption: Optional[str] = None,
grouping: Optional[int] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
super(Image, self).__init__()
# TODO: We should remove grouping, it's a terrible name and I don't
# think anyone uses it.
self._grouping = None
self._caption = None
self._width = None
self._height = None
self._image = None
self._classes = None
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, str):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping: Optional[int] = None,
caption: Optional[str] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
total_classes = {}
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final: Dict[str, BoundingBoxes2D] = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
boxes_final[key] = BoundingBoxes2D(box_item, key)
total_classes.update(boxes_final[key]._class_labels)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final: Dict[str, ImageMask] = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
masks_final[key] = ImageMask(mask_item, key)
if hasattr(masks_final[key], "_val"):
total_classes.update(masks_final[key]._val["class_labels"])
self._masks = masks_final
if classes is not None:
if isinstance(classes, Classes):
total_classes.update(
{val["id"]: val["name"] for val in classes._class_set}
)
else:
total_classes.update({val["id"]: val["name"] for val in classes})
if len(total_classes.keys()) > 0:
self._classes = Classes(
[
{"id": key, "name": total_classes[key]}
for key in total_classes.keys()
]
)
self._width, self._height = self.image.size # type: ignore
self._free_ram()
def _initialize_from_wbimage(self, wbimage: "Image") -> None:
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path: str) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data: "ImageDataType", mode: str = None,) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(MEDIA_TMP.name, str(util.generate_id()) + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls: Type["Image"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Image":
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks: Optional[Dict[str, ImageMask]] = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes: Optional[Dict[str, BoundingBoxes2D]] = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls: Type["Image"]) -> str:
return os.path.join("media", "images")
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
ignore_copy_err: Optional[bool] = None,
) -> None:
super().bind_to_run(run, key, step, id_, ignore_copy_err=ignore_copy_err)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
class_id = hashlib.md5(
str(self._classes._class_set).encode("utf-8")
).hexdigest()
class_name = os.path.join("media", "classes", class_id + "_cls",)
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data: "np.ndarray") -> str:
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data: "np.ndarray") -> "np.ndarray":
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls: Type["Image"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0].image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image: "Image") -> bool:
img_width, img_height = image.image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
if _server_accepts_image_filenames():
meta["filenames"] = [obj["path"] for obj in jsons]
else:
wandb.termwarn(
"Unable to log image array filenames. In some cases, this can prevent images from being"
"viewed in the UI. Please upgrade your wandb server",
repeat=False,
)
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_mask_groups: List[Optional[dict]] = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_box_groups: List[Optional[dict]] = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls: Type["Image"], images: Sequence["Media"]
) -> Union[bool, Sequence[Optional[str]]]:
return cls.captions(images)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Image):
return False
else:
self_image = self.image
other_image = other.image
if self_image is not None:
self_image = list(self_image.getdata())
if other_image is not None:
other_image = list(other_image.getdata())
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self_image == other_image
and self._classes == other._classes
)
def to_data_array(self) -> List[Any]:
res = []
if self.image is not None:
data = list(self.image.getdata())
for i in range(self.image.height):
res.append(data[i * self.image.width : (i + 1) * self.image.width])
self._free_ram()
return res
def _free_ram(self) -> None:
if self._path is not None:
self._image = None
@property
def image(self) -> Optional["PIL.Image"]:
if self._image is None:
if self._path is not None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._image = pil_image.open(self._path)
self._image.load()
return self._image
| en | 0.702842 | # pragma: no cover # type: ignore # type: ignore # type: ignore # type: ignore # Newer versions of wandb accept large image filenames arrays # but older versions would have issues with this. Format images for logging to W&B. Arguments: data_or_path: (numpy array, string, io) Accepts numpy array of image data, or a PIL image. The class attempts to infer the data format and converts it. mode: (string) The PIL mode for an image. Most common are "L", "RGB", "RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes. caption: (string) Label for display of image. Examples: ### Create a wandb.Image from a numpy array <!--yeadoc-test:log-image-numpy-> ```python import numpy as np import wandb wandb.init() examples = [] for i in range(3): pixels = np.random.randint(low=0, high=256, size=(100, 100, 3)) image = wandb.Image(pixels, caption=f"random field {i}") examples.append(image) wandb.log({"examples": examples}) ``` ### Create a wandb.Image from a PILImage <!--yeadoc-test:log-image-pil-> ```python import numpy as np from PIL import Image as PILImage import wandb wandb.init() examples = [] for i in range(3): pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8) pil_image = PILImage.fromarray(pixels, mode="RGB") image = wandb.Image(pil_image, caption=f"random field {i}") examples.append(image) wandb.log({"examples": examples}) ``` # PIL limit # TODO: We should remove grouping, it's a terrible name and I don't # think anyone uses it. # Allows the user to pass an Image object as the first parameter and have a perfect copy, # only overriding additional metdata passed in. If this pattern is compelling, we can generalize. # TODO: Consider injecting top-level classes if user-provided is empty # TODO: Consider injecting top-level classes if user-provided is empty # type: ignore # We do not want to implicitly copy boxes or masks, just the image-related data. # self._boxes = wbimage._boxes # self._masks = wbimage._masks # TF data eager tensors # get rid of trivial dimensions as a convenience Guess what type of image the np.array is representing # TODO: do we want to support dimensions being at the beginning of the array? Converts floating point image on the range [0,1] and integer images on the range [0,255] to uint8, clipping if necessary. # I think it's better to check the image range vs the data type, since many # image libraries will return floats between 0 and 255 # some images have range -1...1 or 0-1 # assert issubclass(data.dtype.type, np.integer), 'Illegal image format.' Combines a list of images into a meta dictionary object describing the child images. # type: ignore # type: ignore # type: ignore | 2.075902 | 2 |
src/ACC_Backend_Utils.py | skostic14/isda-racing-backend | 1 | 833 | <reponame>skostic14/isda-racing-backend
import datetime
# Gets time from milliseconds
# Returns string formatted as HH:MM:SS:mmm, MM:SS:mmm or S:mmm, depending on the time.
def get_time_from_milliseconds(milli):
milliseconds = milli % 1000
seconds= (milli//1000)%60
minutes= (milli//(1000*60))%60
hours= (milli//(1000*60*60))%24
if hours == 0:
if minutes == 0:
return '%d.%03d' % (seconds, milliseconds)
return '%02d:%02d.%03d' % (minutes, seconds, milliseconds)
return '%02d:%02d:%02d.%03d' % (hours, minutes, seconds, milliseconds)
# Returns a string formatted as YYYY-MM-DD
def get_date_today():
return datetime.date.today().strftime("%Y-%m-%d") | import datetime
# Gets time from milliseconds
# Returns string formatted as HH:MM:SS:mmm, MM:SS:mmm or S:mmm, depending on the time.
def get_time_from_milliseconds(milli):
milliseconds = milli % 1000
seconds= (milli//1000)%60
minutes= (milli//(1000*60))%60
hours= (milli//(1000*60*60))%24
if hours == 0:
if minutes == 0:
return '%d.%03d' % (seconds, milliseconds)
return '%02d:%02d.%03d' % (minutes, seconds, milliseconds)
return '%02d:%02d:%02d.%03d' % (hours, minutes, seconds, milliseconds)
# Returns a string formatted as YYYY-MM-DD
def get_date_today():
return datetime.date.today().strftime("%Y-%m-%d") | en | 0.644782 | # Gets time from milliseconds # Returns string formatted as HH:MM:SS:mmm, MM:SS:mmm or S:mmm, depending on the time. # Returns a string formatted as YYYY-MM-DD | 3.425753 | 3 |
examples/advanced/pidigits.py | ovolve/sympy | 3 | 834 | #!/usr/bin/env python
"""Pi digits example
Example shows arbitrary precision using mpmath with the
computation of the digits of pi.
"""
from mpmath import libmp, pi
from mpmath import functions as mpf_funs
import math
from time import clock
import sys
def display_fraction(digits, skip=0, colwidth=10, columns=5):
"""Pretty printer for first n digits of a fraction"""
perline = colwidth * columns
printed = 0
for linecount in range((len(digits) - skip) // (colwidth * columns)):
line = digits[skip + linecount*perline:skip + (linecount + 1)*perline]
for i in range(columns):
print(line[i*colwidth: (i + 1)*colwidth],)
print(":", (linecount + 1)*perline)
if (linecount + 1) % 10 == 0:
print
printed += colwidth*columns
rem = (len(digits) - skip) % (colwidth * columns)
if rem:
buf = digits[-rem:]
s = ""
for i in range(columns):
s += buf[:colwidth].ljust(colwidth + 1, " ")
buf = buf[colwidth:]
print(s + ":", printed + colwidth*columns)
def calculateit(func, base, n, tofile):
"""Writes first n base-digits of a mpmath function to file"""
prec = 100
intpart = libmp.numeral(3, base)
if intpart == 0:
skip = 0
else:
skip = len(intpart)
print("Step 1 of 2: calculating binary value...")
prec = int(n*math.log(base, 2)) + 10
t = clock()
a = func(prec)
step1_time = clock() - t
print("Step 2 of 2: converting to specified base...")
t = clock()
d = libmp.bin_to_radix(a.man, -a.exp, base, n)
d = libmp.numeral(d, base, n)
step2_time = clock() - t
print("\nWriting output...\n")
if tofile:
out_ = sys.stdout
sys.stdout = tofile
print("%i base-%i digits of pi:\n" % (n, base))
print(intpart, ".\n")
display_fraction(d, skip, colwidth=10, columns=5)
if tofile:
sys.stdout = out_
print("\nFinished in %f seconds (%f calc, %f convert)" % \
((step1_time + step2_time), step1_time, step2_time))
def interactive():
"""Simple function to interact with user"""
print("Compute digits of pi with SymPy\n")
base = input("Which base? (2-36, 10 for decimal) \n> ")
digits = input("How many digits? (enter a big number, say, 10000)\n> ")
tofile = raw_input("Output to file? (enter a filename, or just press enter\nto print directly to the screen) \n> ")
if tofile:
tofile = open(tofile, "w")
calculateit(pi, base, digits, tofile)
def main():
"""A non-interactive runner"""
base = 16
digits = 500
tofile = None
calculateit(pi, base, digits, tofile)
if __name__ == "__main__":
interactive()
| #!/usr/bin/env python
"""Pi digits example
Example shows arbitrary precision using mpmath with the
computation of the digits of pi.
"""
from mpmath import libmp, pi
from mpmath import functions as mpf_funs
import math
from time import clock
import sys
def display_fraction(digits, skip=0, colwidth=10, columns=5):
"""Pretty printer for first n digits of a fraction"""
perline = colwidth * columns
printed = 0
for linecount in range((len(digits) - skip) // (colwidth * columns)):
line = digits[skip + linecount*perline:skip + (linecount + 1)*perline]
for i in range(columns):
print(line[i*colwidth: (i + 1)*colwidth],)
print(":", (linecount + 1)*perline)
if (linecount + 1) % 10 == 0:
print
printed += colwidth*columns
rem = (len(digits) - skip) % (colwidth * columns)
if rem:
buf = digits[-rem:]
s = ""
for i in range(columns):
s += buf[:colwidth].ljust(colwidth + 1, " ")
buf = buf[colwidth:]
print(s + ":", printed + colwidth*columns)
def calculateit(func, base, n, tofile):
"""Writes first n base-digits of a mpmath function to file"""
prec = 100
intpart = libmp.numeral(3, base)
if intpart == 0:
skip = 0
else:
skip = len(intpart)
print("Step 1 of 2: calculating binary value...")
prec = int(n*math.log(base, 2)) + 10
t = clock()
a = func(prec)
step1_time = clock() - t
print("Step 2 of 2: converting to specified base...")
t = clock()
d = libmp.bin_to_radix(a.man, -a.exp, base, n)
d = libmp.numeral(d, base, n)
step2_time = clock() - t
print("\nWriting output...\n")
if tofile:
out_ = sys.stdout
sys.stdout = tofile
print("%i base-%i digits of pi:\n" % (n, base))
print(intpart, ".\n")
display_fraction(d, skip, colwidth=10, columns=5)
if tofile:
sys.stdout = out_
print("\nFinished in %f seconds (%f calc, %f convert)" % \
((step1_time + step2_time), step1_time, step2_time))
def interactive():
"""Simple function to interact with user"""
print("Compute digits of pi with SymPy\n")
base = input("Which base? (2-36, 10 for decimal) \n> ")
digits = input("How many digits? (enter a big number, say, 10000)\n> ")
tofile = raw_input("Output to file? (enter a filename, or just press enter\nto print directly to the screen) \n> ")
if tofile:
tofile = open(tofile, "w")
calculateit(pi, base, digits, tofile)
def main():
"""A non-interactive runner"""
base = 16
digits = 500
tofile = None
calculateit(pi, base, digits, tofile)
if __name__ == "__main__":
interactive()
| en | 0.55283 | #!/usr/bin/env python Pi digits example Example shows arbitrary precision using mpmath with the computation of the digits of pi. Pretty printer for first n digits of a fraction Writes first n base-digits of a mpmath function to file Simple function to interact with user A non-interactive runner | 3.797986 | 4 |
authserver/mailauth/migrations/0011_mnserviceuser.py | yopiti/authserver | 8 | 835 | <filename>authserver/mailauth/migrations/0011_mnserviceuser.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-13 00:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mailauth.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('mailauth', '0010_domain_redirect_to'),
]
operations = [
migrations.CreateModel(
name='MNServiceUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(default=uuid.uuid4, max_length=64, verbose_name='Username')),
('password', <PASSWORD>.models.<PASSWORD>(max_length=128, verbose_name='Password')),
('description', models.CharField(blank=True, default='', max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Service User',
'verbose_name_plural': 'Service Users',
},
bases=(mailauth.models.PasswordMaskMixin, models.Model),
),
]
| <filename>authserver/mailauth/migrations/0011_mnserviceuser.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-13 00:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mailauth.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('mailauth', '0010_domain_redirect_to'),
]
operations = [
migrations.CreateModel(
name='MNServiceUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(default=uuid.uuid4, max_length=64, verbose_name='Username')),
('password', <PASSWORD>.models.<PASSWORD>(max_length=128, verbose_name='Password')),
('description', models.CharField(blank=True, default='', max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Service User',
'verbose_name_plural': 'Service Users',
},
bases=(mailauth.models.PasswordMaskMixin, models.Model),
),
]
| en | 0.688379 | # -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-03-13 00:16 | 1.704397 | 2 |
tempest/hacking/checks.py | rishabh20111990/tempest | 2 | 836 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from hacking import core
import pycodestyle
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'ironic', 'heat', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
METHOD = re.compile(r"^ def .+")
METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+")
METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+")
CLASS = re.compile(r"^class .+")
EX_ATTRIBUTE = re.compile(r'(\s+|\()(e|ex|exc|exception).message(\s+|\))')
NEGATIVE_TEST_DECORATOR = re.compile(
r'\s*@decorators\.attr\(type=.*negative.*\)')
_HAVE_NEGATIVE_DECORATOR = False
@core.flake8ext
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
"""Check for client imports from tempest/api & tempest/scenario tests
T102: Cannot import OpenStack python clients
"""
if "tempest/api" in filename or "tempest/scenario" in filename:
res = PYTHON_CLIENT_RE.match(physical_line)
if res:
return (physical_line.find(res.group(1)),
("T102: python clients import not allowed"
" in tempest/api/* or tempest/scenario/* tests"))
@core.flake8ext
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
T104: Scenario tests require a services decorator
"""
if 'tempest/scenario/' in filename and '/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
"T104: Scenario tests require a service decorator")
@core.flake8ext
def no_setup_teardown_class_for_tests(physical_line, filename):
if pycodestyle.noqa(physical_line):
return
if 'tempest/test.py' in filename or 'tempest/lib/' in filename:
return
if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
"T105: (setUp|tearDown)Class can not be used in tests")
@core.flake8ext
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
A service tag should only be added if the service name isn't already in
the module path.
T107
"""
# NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
# created for services like heat which would cause false negatives for
# those tests, so just exclude the scenario tests.
if 'tempest/scenario' not in filename:
matches = SCENARIO_DECORATOR.match(physical_line)
if matches:
services = matches.group(1).split(',')
for service in services:
service_name = service.strip().strip("'")
modulepath = os.path.split(filename)[0]
if service_name in modulepath:
return (physical_line.find(service_name),
"T107: service tag should not be in path")
@core.flake8ext
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
T108
"""
msg = "T108: hyphen should not be specified at the end of rand_name()"
if RAND_NAME_HYPHEN_RE.match(logical_line):
return 0, msg
@core.flake8ext
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
N322: Method's default argument shouldn't be mutable
"""
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def no_testtools_skip_decorator(logical_line):
"""Check that methods do not have the testtools.skip decorator
T109
"""
if TESTTOOLS_SKIP_DECORATOR.match(logical_line):
yield (0, "T109: Cannot use testtools.skip decorator; instead use "
"decorators.skip_because from tempest.lib")
def _common_service_clients_check(logical_line, physical_line, filename,
ignored_list_file=None):
if not re.match('tempest/(lib/)?services/.*', filename):
return False
if ignored_list_file is not None:
ignored_list = []
with open('tempest/hacking/' + ignored_list_file) as f:
for line in f:
ignored_list.append(line.strip())
if filename in ignored_list:
return False
if not METHOD.match(physical_line):
return False
if pycodestyle.noqa(physical_line):
return False
return True
@core.flake8ext
def get_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of GET should be consistent
T110
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T110.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.get(' not in line and ('self.show_resource(' not in line and
'self.list_resources(' not in line):
continue
if METHOD_GET_RESOURCE.match(logical_line):
return
msg = ("T110: [GET /resources] methods should be list_<resource name>s"
" or show_<resource name>")
yield (0, msg)
@core.flake8ext
def delete_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of DELETE should be consistent
T111
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T111.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.delete(' not in line and 'self.delete_resource(' not in line:
continue
if METHOD_DELETE_RESOURCE.match(logical_line):
return
msg = ("T111: [DELETE /resources/<id>] methods should be "
"delete_<resource name>")
yield (0, msg)
@core.flake8ext
def dont_import_local_tempest_into_lib(logical_line, filename):
"""Check that tempest.lib should not import local tempest code
T112
"""
if 'tempest/lib/' not in filename:
return
if not ('from tempest' in logical_line or
'import tempest' in logical_line):
return
if ('from tempest.lib' in logical_line or
'import tempest.lib' in logical_line):
return
msg = ("T112: tempest.lib should not import local tempest code to avoid "
"circular dependency")
yield (0, msg)
@core.flake8ext
def use_rand_uuid_instead_of_uuid4(logical_line, filename):
"""Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
T113
"""
if 'tempest/lib/' in filename:
return
if 'uuid.uuid4()' not in logical_line:
return
msg = ("T113: Tests should use data_utils.rand_uuid()/rand_uuid_hex() "
"instead of uuid.uuid4()/uuid.uuid4().hex")
yield (0, msg)
@core.flake8ext
def dont_use_config_in_tempest_lib(logical_line, filename):
"""Check that tempest.lib doesn't use tempest config
T114
"""
if 'tempest/lib/' not in filename:
return
if ('tempest.config' in logical_line or
'from tempest import config' in logical_line or
'oslo_config' in logical_line):
msg = ('T114: tempest.lib can not have any dependency on tempest '
'config.')
yield(0, msg)
@core.flake8ext
def dont_put_admin_tests_on_nonadmin_path(logical_line,
filename):
"""Check admin tests should exist under admin path
T115
"""
if 'tempest/api/' not in filename:
return
if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
return
if not re.match(r'.\/tempest\/api\/.*\/admin\/.*', filename):
msg = 'T115: All admin tests should exist under admin path.'
yield(0, msg)
@core.flake8ext
def unsupported_exception_attribute_PY3(logical_line):
"""Check Unsupported 'message' exception attribute in PY3
T116
"""
result = EX_ATTRIBUTE.search(logical_line)
msg = ("[T116] Unsupported 'message' Exception attribute in PY3")
if result:
yield(0, msg)
@core.flake8ext
def negative_test_attribute_always_applied_to_negative_tests(physical_line,
filename):
"""Check ``@decorators.attr(type=['negative'])`` applied to negative tests.
T117
"""
global _HAVE_NEGATIVE_DECORATOR
if re.match(r'.\/tempest\/api\/.*_negative.*', filename):
if NEGATIVE_TEST_DECORATOR.match(physical_line):
_HAVE_NEGATIVE_DECORATOR = True
return
if TEST_DEFINITION.match(physical_line):
if not _HAVE_NEGATIVE_DECORATOR:
return (
0, "T117: Must apply `@decorators.attr(type=['negative'])`"
" to all negative API tests"
)
_HAVE_NEGATIVE_DECORATOR = False
| # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from hacking import core
import pycodestyle
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'ironic', 'heat', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
METHOD = re.compile(r"^ def .+")
METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+")
METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+")
CLASS = re.compile(r"^class .+")
EX_ATTRIBUTE = re.compile(r'(\s+|\()(e|ex|exc|exception).message(\s+|\))')
NEGATIVE_TEST_DECORATOR = re.compile(
r'\s*@decorators\.attr\(type=.*negative.*\)')
_HAVE_NEGATIVE_DECORATOR = False
@core.flake8ext
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
"""Check for client imports from tempest/api & tempest/scenario tests
T102: Cannot import OpenStack python clients
"""
if "tempest/api" in filename or "tempest/scenario" in filename:
res = PYTHON_CLIENT_RE.match(physical_line)
if res:
return (physical_line.find(res.group(1)),
("T102: python clients import not allowed"
" in tempest/api/* or tempest/scenario/* tests"))
@core.flake8ext
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
T104: Scenario tests require a services decorator
"""
if 'tempest/scenario/' in filename and '/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
"T104: Scenario tests require a service decorator")
@core.flake8ext
def no_setup_teardown_class_for_tests(physical_line, filename):
if pycodestyle.noqa(physical_line):
return
if 'tempest/test.py' in filename or 'tempest/lib/' in filename:
return
if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
"T105: (setUp|tearDown)Class can not be used in tests")
@core.flake8ext
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
A service tag should only be added if the service name isn't already in
the module path.
T107
"""
# NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
# created for services like heat which would cause false negatives for
# those tests, so just exclude the scenario tests.
if 'tempest/scenario' not in filename:
matches = SCENARIO_DECORATOR.match(physical_line)
if matches:
services = matches.group(1).split(',')
for service in services:
service_name = service.strip().strip("'")
modulepath = os.path.split(filename)[0]
if service_name in modulepath:
return (physical_line.find(service_name),
"T107: service tag should not be in path")
@core.flake8ext
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
T108
"""
msg = "T108: hyphen should not be specified at the end of rand_name()"
if RAND_NAME_HYPHEN_RE.match(logical_line):
return 0, msg
@core.flake8ext
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
N322: Method's default argument shouldn't be mutable
"""
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def no_testtools_skip_decorator(logical_line):
"""Check that methods do not have the testtools.skip decorator
T109
"""
if TESTTOOLS_SKIP_DECORATOR.match(logical_line):
yield (0, "T109: Cannot use testtools.skip decorator; instead use "
"decorators.skip_because from tempest.lib")
def _common_service_clients_check(logical_line, physical_line, filename,
ignored_list_file=None):
if not re.match('tempest/(lib/)?services/.*', filename):
return False
if ignored_list_file is not None:
ignored_list = []
with open('tempest/hacking/' + ignored_list_file) as f:
for line in f:
ignored_list.append(line.strip())
if filename in ignored_list:
return False
if not METHOD.match(physical_line):
return False
if pycodestyle.noqa(physical_line):
return False
return True
@core.flake8ext
def get_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of GET should be consistent
T110
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T110.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.get(' not in line and ('self.show_resource(' not in line and
'self.list_resources(' not in line):
continue
if METHOD_GET_RESOURCE.match(logical_line):
return
msg = ("T110: [GET /resources] methods should be list_<resource name>s"
" or show_<resource name>")
yield (0, msg)
@core.flake8ext
def delete_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of DELETE should be consistent
T111
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T111.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.delete(' not in line and 'self.delete_resource(' not in line:
continue
if METHOD_DELETE_RESOURCE.match(logical_line):
return
msg = ("T111: [DELETE /resources/<id>] methods should be "
"delete_<resource name>")
yield (0, msg)
@core.flake8ext
def dont_import_local_tempest_into_lib(logical_line, filename):
"""Check that tempest.lib should not import local tempest code
T112
"""
if 'tempest/lib/' not in filename:
return
if not ('from tempest' in logical_line or
'import tempest' in logical_line):
return
if ('from tempest.lib' in logical_line or
'import tempest.lib' in logical_line):
return
msg = ("T112: tempest.lib should not import local tempest code to avoid "
"circular dependency")
yield (0, msg)
@core.flake8ext
def use_rand_uuid_instead_of_uuid4(logical_line, filename):
"""Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
T113
"""
if 'tempest/lib/' in filename:
return
if 'uuid.uuid4()' not in logical_line:
return
msg = ("T113: Tests should use data_utils.rand_uuid()/rand_uuid_hex() "
"instead of uuid.uuid4()/uuid.uuid4().hex")
yield (0, msg)
@core.flake8ext
def dont_use_config_in_tempest_lib(logical_line, filename):
"""Check that tempest.lib doesn't use tempest config
T114
"""
if 'tempest/lib/' not in filename:
return
if ('tempest.config' in logical_line or
'from tempest import config' in logical_line or
'oslo_config' in logical_line):
msg = ('T114: tempest.lib can not have any dependency on tempest '
'config.')
yield(0, msg)
@core.flake8ext
def dont_put_admin_tests_on_nonadmin_path(logical_line,
filename):
"""Check admin tests should exist under admin path
T115
"""
if 'tempest/api/' not in filename:
return
if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
return
if not re.match(r'.\/tempest\/api\/.*\/admin\/.*', filename):
msg = 'T115: All admin tests should exist under admin path.'
yield(0, msg)
@core.flake8ext
def unsupported_exception_attribute_PY3(logical_line):
"""Check Unsupported 'message' exception attribute in PY3
T116
"""
result = EX_ATTRIBUTE.search(logical_line)
msg = ("[T116] Unsupported 'message' Exception attribute in PY3")
if result:
yield(0, msg)
@core.flake8ext
def negative_test_attribute_always_applied_to_negative_tests(physical_line,
filename):
"""Check ``@decorators.attr(type=['negative'])`` applied to negative tests.
T117
"""
global _HAVE_NEGATIVE_DECORATOR
if re.match(r'.\/tempest\/api\/.*_negative.*', filename):
if NEGATIVE_TEST_DECORATOR.match(physical_line):
_HAVE_NEGATIVE_DECORATOR = True
return
if TEST_DEFINITION.match(physical_line):
if not _HAVE_NEGATIVE_DECORATOR:
return (
0, "T117: Must apply `@decorators.attr(type=['negative'])`"
" to all negative API tests"
)
_HAVE_NEGATIVE_DECORATOR = False
| en | 0.802697 | # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Check for client imports from tempest/api & tempest/scenario tests T102: Cannot import OpenStack python clients Check that scenario tests have service tags T104: Scenario tests require a services decorator Check that a service tag isn't in the module path A service tag should only be added if the service name isn't already in the module path. T107 # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are # created for services like heat which would cause false negatives for # those tests, so just exclude the scenario tests. Check no hyphen at the end of rand_name() argument T108 Check that mutable object isn't used as default argument N322: Method's default argument shouldn't be mutable Check that methods do not have the testtools.skip decorator T109 Check that service client names of GET should be consistent T110 # the end of a method Check that service client names of DELETE should be consistent T111 # the end of a method Check that tempest.lib should not import local tempest code T112 Check that tests use data_utils.rand_uuid() instead of uuid.uuid4() T113 Check that tempest.lib doesn't use tempest config T114 Check admin tests should exist under admin path T115 Check Unsupported 'message' exception attribute in PY3 T116 Check ``@decorators.attr(type=['negative'])`` applied to negative tests. T117 | 2.216594 | 2 |
data/train/python/6a98547230e4cc83fa248137ca0fde09ebb67dcfController.py | harshp8l/deep-learning-lang-detection | 84 | 837 | import SimpleXMLRPCServer
import sys
import logging
from K8055Controller import K8055Controller
logging.basicConfig()
controller_log = logging.getLogger("Controller")
class Controller:
def __init__(self):
self.k8055 = K8055Controller()
controller_log.debug("initialized")
def reset(self):
self.k8055.reset()
controller_log.debug("reset")
return 0
def turn_on(self, i):
self.k8055.turn_on(i)
controller_log.debug('turned on %i' % (i))
return 0
def turn_off(self, i):
self.k8055.turn_off(i)
controller_log.debug('turned off %i' % (i))
return 0
def set_analog(self, i, level):
if (i == 1):
self.k8055.set_analog1(level)
else:
self.k8055.set_analog2(level)
return 0
controller = Controller()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("d6349.mysql.zone.ee", 7000))
server.register_instance(controller)
server.serve_forever() | import SimpleXMLRPCServer
import sys
import logging
from K8055Controller import K8055Controller
logging.basicConfig()
controller_log = logging.getLogger("Controller")
class Controller:
def __init__(self):
self.k8055 = K8055Controller()
controller_log.debug("initialized")
def reset(self):
self.k8055.reset()
controller_log.debug("reset")
return 0
def turn_on(self, i):
self.k8055.turn_on(i)
controller_log.debug('turned on %i' % (i))
return 0
def turn_off(self, i):
self.k8055.turn_off(i)
controller_log.debug('turned off %i' % (i))
return 0
def set_analog(self, i, level):
if (i == 1):
self.k8055.set_analog1(level)
else:
self.k8055.set_analog2(level)
return 0
controller = Controller()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("d6349.mysql.zone.ee", 7000))
server.register_instance(controller)
server.serve_forever() | none | 1 | 2.605427 | 3 |
|
model/__init__.py | sun1638650145/CRNN | 11 | 838 | <gh_stars>10-100
from .crnn import CRNN
from .crnn import CRNN_Attention | from .crnn import CRNN
from .crnn import CRNN_Attention | none | 1 | 1.003661 | 1 |
|
models2.py | Lydia-Tan/MindLife | 1 | 839 | <filename>models2.py
import nltk
import re
import sys
from sys import argv
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def ajay(ans):
ajay = SentimentIntensityAnalyzer()
completeScore = 0
questionWeights = [0.05, 0.20, 0.05, 0.05, 0.05, 0.20, 0.05, 0.05, 0.20, 0.10]
print ans
ansList = ans.split("$")
for j in range(10):
print ansList[j]
for i in range(10):
results = []
score = 0
count = 0
# print (count)
for paragraph in ansList:
for line in paragraph:
#Split Paragraph on basis of '.' or ? or !.
for l in re.split(r"\.|\?|\!",paragraph):
# print(l)
ss = ajay.polarity_scores(l)
results.append(ss);
# print(ss['compound'])
score += ss['compound']
count += 1
completeScore += (score/count)*questionWeights[i]
#print(completeScore)
if (completeScore >= 0.1):
return "False Alarm! You don't have Depression."
elif (completeScore >= -0.1):
return ("Seasonal affective disorder (SAD). This type of depression " +
"emerges as days get shorter in the fall and winter. The mood "
+ "change may result from alterations in the body's natural daily "
+ "rhythms, in the eyes' sensitivity to light, or in how chemical "
+ "messengers like serotonin and melatonin function. The leading "
+ "treatment is light therapy, which involves daily sessions sitting "
+ "close to an especially intense light source. The usual treatments "
+ "for depression, such as psychotherapy and medication, may also be "
+ "effective.");
elif (completeScore >= -0.4):
return ("Persistent depressive disorder. Formerly called dysthymia, this "
+ "type of depression refers to low mood that has lasted for at least "
+ "two years but may not reach the intensity of major depression. Many "
+ "people with this type of depression type are able to function day to "
+ "but feel low or joyless much of the time. Some depressive symptoms, "
+ "such as appetite and sleep changes, low energy, low self-esteem, or "
+ "hopelessness, are usually part of the picture.")
else:
return ("The classic depression type, major depression is a state where a dark "
+ "mood is all-consuming and one loses interest in activities, even ones "
+ "that are usually pleasurable. Symptoms of this type of depression "
+ "include trouble sleeping, changes in appetite or weight, loss of energy, "
+ "and feeling worthless. Thoughts of death or suicide may occur. It is "
+ "usually treated with psychotherapy and medication. For some people with "
+ "severe depression that isn't alleviated with psychotherapy or antidepressant "
+ "medications, electroconvulsive therapy may be effective.") | <filename>models2.py
import nltk
import re
import sys
from sys import argv
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def ajay(ans):
ajay = SentimentIntensityAnalyzer()
completeScore = 0
questionWeights = [0.05, 0.20, 0.05, 0.05, 0.05, 0.20, 0.05, 0.05, 0.20, 0.10]
print ans
ansList = ans.split("$")
for j in range(10):
print ansList[j]
for i in range(10):
results = []
score = 0
count = 0
# print (count)
for paragraph in ansList:
for line in paragraph:
#Split Paragraph on basis of '.' or ? or !.
for l in re.split(r"\.|\?|\!",paragraph):
# print(l)
ss = ajay.polarity_scores(l)
results.append(ss);
# print(ss['compound'])
score += ss['compound']
count += 1
completeScore += (score/count)*questionWeights[i]
#print(completeScore)
if (completeScore >= 0.1):
return "False Alarm! You don't have Depression."
elif (completeScore >= -0.1):
return ("Seasonal affective disorder (SAD). This type of depression " +
"emerges as days get shorter in the fall and winter. The mood "
+ "change may result from alterations in the body's natural daily "
+ "rhythms, in the eyes' sensitivity to light, or in how chemical "
+ "messengers like serotonin and melatonin function. The leading "
+ "treatment is light therapy, which involves daily sessions sitting "
+ "close to an especially intense light source. The usual treatments "
+ "for depression, such as psychotherapy and medication, may also be "
+ "effective.");
elif (completeScore >= -0.4):
return ("Persistent depressive disorder. Formerly called dysthymia, this "
+ "type of depression refers to low mood that has lasted for at least "
+ "two years but may not reach the intensity of major depression. Many "
+ "people with this type of depression type are able to function day to "
+ "but feel low or joyless much of the time. Some depressive symptoms, "
+ "such as appetite and sleep changes, low energy, low self-esteem, or "
+ "hopelessness, are usually part of the picture.")
else:
return ("The classic depression type, major depression is a state where a dark "
+ "mood is all-consuming and one loses interest in activities, even ones "
+ "that are usually pleasurable. Symptoms of this type of depression "
+ "include trouble sleeping, changes in appetite or weight, loss of energy, "
+ "and feeling worthless. Thoughts of death or suicide may occur. It is "
+ "usually treated with psychotherapy and medication. For some people with "
+ "severe depression that isn't alleviated with psychotherapy or antidepressant "
+ "medications, electroconvulsive therapy may be effective.") | en | 0.501959 | # print (count) #Split Paragraph on basis of '.' or ? or !. # print(l) # print(ss['compound']) #print(completeScore) | 3.225963 | 3 |
tokendito/tool.py | pcmxgti/tokendito | 40 | 840 | # vim: set filetype=python ts=4 sw=4
# -*- coding: utf-8 -*-
"""This module retrieves AWS credentials after authenticating with Okta."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from future import standard_library
from tokendito import aws_helpers
from tokendito import helpers
from tokendito import okta_helpers
from tokendito import settings
standard_library.install_aliases()
def cli(args):
"""Tokendito retrieves AWS credentials after authenticating with Okta."""
# Set some required initial values
args = helpers.setup(args)
logging.debug("tokendito retrieves AWS credentials after authenticating with Okta.")
# Collect and organize user specific information
helpers.process_options(args)
# Authenticate okta and AWS also use assumerole to assign the role
logging.debug("Authenticate user with Okta and AWS.")
secret_session_token = okta_helpers.authenticate_user(
settings.okta_org, settings.okta_username, settings.okta_password
)
saml_response_string, saml_xml = aws_helpers.authenticate_to_roles(
secret_session_token, settings.okta_aws_app_url
)
assume_role_response, role_name = aws_helpers.select_assumeable_role(
saml_response_string, saml_xml
)
aws_helpers.ensure_keys_work(assume_role_response)
helpers.set_local_credentials(
assume_role_response, role_name, settings.aws_region, settings.aws_output
)
| # vim: set filetype=python ts=4 sw=4
# -*- coding: utf-8 -*-
"""This module retrieves AWS credentials after authenticating with Okta."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from future import standard_library
from tokendito import aws_helpers
from tokendito import helpers
from tokendito import okta_helpers
from tokendito import settings
standard_library.install_aliases()
def cli(args):
"""Tokendito retrieves AWS credentials after authenticating with Okta."""
# Set some required initial values
args = helpers.setup(args)
logging.debug("tokendito retrieves AWS credentials after authenticating with Okta.")
# Collect and organize user specific information
helpers.process_options(args)
# Authenticate okta and AWS also use assumerole to assign the role
logging.debug("Authenticate user with Okta and AWS.")
secret_session_token = okta_helpers.authenticate_user(
settings.okta_org, settings.okta_username, settings.okta_password
)
saml_response_string, saml_xml = aws_helpers.authenticate_to_roles(
secret_session_token, settings.okta_aws_app_url
)
assume_role_response, role_name = aws_helpers.select_assumeable_role(
saml_response_string, saml_xml
)
aws_helpers.ensure_keys_work(assume_role_response)
helpers.set_local_credentials(
assume_role_response, role_name, settings.aws_region, settings.aws_output
)
| en | 0.626398 | # vim: set filetype=python ts=4 sw=4 # -*- coding: utf-8 -*- This module retrieves AWS credentials after authenticating with Okta. Tokendito retrieves AWS credentials after authenticating with Okta. # Set some required initial values # Collect and organize user specific information # Authenticate okta and AWS also use assumerole to assign the role | 2.0596 | 2 |
resources/__init__.py | Boryslavq/UHMI_Chalenge | 0 | 841 | <reponame>Boryslavq/UHMI_Chalenge<filename>resources/__init__.py
from . import rest
from . import helpers
| from . import rest
from . import helpers | none | 1 | 1.099239 | 1 |
|
ir_datasets/formats/trec.py | cakiki/ir_datasets | 0 | 842 | <filename>ir_datasets/formats/trec.py
import io
import codecs
import tarfile
import re
import gzip
import xml.etree.ElementTree as ET
from fnmatch import fnmatch
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from .base import GenericDoc, GenericQuery, GenericScoredDoc, BaseDocs, BaseQueries, BaseScoredDocs, BaseQrels
class TrecDoc(NamedTuple):
doc_id: str
text: str
marked_up_doc: str
class TitleUrlTextDoc(NamedTuple):
doc_id: str
title: str
url: str
text: str
class TrecQuery(NamedTuple):
query_id: str
title: str
description: str
narrative: str
class TrecSubtopic(NamedTuple):
number: str
text: str
type: str
class TrecQrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
iteration: str
class TrecPrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
method: int
iprob: float
# Default content tags from Anserini's TrecCollection
CONTENT_TAGS = 'TEXT HEADLINE TITLE HL HEAD TTL DD DATE LP LEADPARA'.split()
class TrecDocs(BaseDocs):
def __init__(self, docs_dlc, encoding=None, path_globs=None, content_tags=CONTENT_TAGS, parser='BS4', namespace=None, lang=None, expected_file_count=None, docstore_size_hint=None, count_hint=None):
self._docs_dlc = docs_dlc
self._encoding = encoding
self._path_globs = path_globs
self._content_tags = content_tags
self._parser = {
'BS4': self._parser_bs,
'text': self._parser_text,
'tut': self._parser_tut,
}[parser]
self._doc = {
'BS4': TrecDoc,
'text': GenericDoc,
'tut': TitleUrlTextDoc,
}[parser]
self._docs_namespace = namespace
self._docs_lang = lang
self._expected_file_count = expected_file_count
self._docstore_size_hint = docstore_size_hint
self._count_hint = count_hint
if expected_file_count is not None:
assert self._path_globs is not None, "expected_file_count only supported with path_globs"
def docs_path(self, force=True):
return self._docs_dlc.path(force)
@ir_datasets.util.use_docstore
def docs_iter(self):
if Path(self._docs_dlc.path()).is_dir():
if self._path_globs:
file_count = 0
for glob in sorted(self._path_globs):
for path in sorted(Path(self._docs_dlc.path()).glob(glob)):
file_count += 1
yield from self._docs_iter(path)
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
yield from self._docs_iter(self._docs_dlc.path())
else:
if self._path_globs:
file_count = 0
# tarfile, find globs, open in streaming mode (r|)
with self._docs_dlc.stream() as stream:
with tarfile.open(fileobj=stream, mode='r|gz') as tarf:
for block in tarf:
if any(fnmatch(block.name, g) for g in self._path_globs):
file = tarf.extractfile(block)
if block.name.endswith('.gz'):
file = gzip.GzipFile(fileobj=file)
yield from self._parser(file)
file_count += 1
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
with self._docs_dlc.stream() as f:
yield from self._parser(f)
def _docs_iter(self, path):
if Path(path).is_file():
if str(path).endswith('.gz'):
with gzip.open(path, 'rb') as f:
yield from self._parser(f)
else:
with path.open('rb') as f:
yield from self._parser(f)
elif Path(path).is_dir():
for child in path.iterdir():
yield from self._docs_iter(child)
def _parser_bs(self, stream):
BeautifulSoup = ir_datasets.lazy_libs.bs4().BeautifulSoup
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_markup = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
soup = BeautifulSoup(f'<OUTER>\n{doc_markup}\n</OUTER>', 'lxml')
text = soup.get_text()
yield TrecDoc(doc_id, text, doc_markup)
doc_id, doc_markup = None, ''
else:
if in_tag:
doc_markup += line
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag -= 1
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag += 1
if in_tag == 1:
doc_markup += line
def _parser_text(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_text = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
yield GenericDoc(doc_id, doc_text)
doc_id, doc_text = None, ''
else:
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag = True
def _parser_tut(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
if line.startswith('<TITLE>'):
doc_title = line.replace('<TITLE>', '').replace('</TITLE>\n', '').strip()
if line.startswith('<URL>'):
doc_url = line.replace('<URL>', '').replace('</URL>\n', '').strip()
elif line == '</DOC>\n':
yield TitleUrlTextDoc(doc_id, doc_title, doc_url, doc_text)
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
else:
if line.startswith('</TEXT>'):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<TEXT>'):
in_tag = True
def docs_cls(self):
return self._doc
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{self.docs_path(force=False)}.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
size_hint=self._docstore_size_hint,
count_hint=self._count_hint,
)
def docs_count(self):
if self.docs_store().built():
return self.docs_store().count()
def docs_namespace(self):
return self._docs_namespace
def docs_lang(self):
return self._docs_lang
DEFAULT_QTYPE_MAP = {
'<num> *(Number:)?': 'query_id',
'<title> *(Topic:)?': 'title',
'<desc> *(Description:)?': 'description',
'<narr> *(Narrative:)?': 'narrative'
}
class TrecQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, namespace=None, lang=None, remove_tags=('</title>',)):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or DEFAULT_QTYPE_MAP
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
self._remove_tags = remove_tags
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
fields, reading = {}, None
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
if line.startswith('</top>'):
assert len(fields) == len(self._qtype._fields), fields
for tag in self._remove_tags:
fields = {k: v.replace(tag, '') for k, v in fields.items()}
yield self._qtype(*(fields[f].strip() for f in self._qtype._fields))
fields, reading = {}, None
match_any = False
for tag, target in self._qtype_map.items():
match = re.match(tag, line)
if match:
fields[target] = line[match.end():]
reading = target
match_any = True
break
if not match_any and reading and not line.startswith('<'):
fields[reading] += line
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecXmlQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, subtopics_key='subtopics', namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or {f: f for f in qtype._fields}
self._encoding = encoding
self._subtopics_key = subtopics_key
self._queries_namespace = namespace
self._queries_lang = lang
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for topic_el in ET.fromstring(f.read()):
item = [None for _ in self._qtype._fields]
if 'number' in topic_el.attrib:
item[self._qtype._fields.index('query_id')] = topic_el.attrib['number']
subtopics = []
for attr in topic_el.attrib:
if attr in self._qtype_map:
text = topic_el.attrib[attr]
field = self._qtype_map[attr]
item[self._qtype._fields.index(field)] = text
if topic_el.tag in self._qtype_map:
text = ''.join(topic_el.itertext())
field = self._qtype_map[topic_el.tag]
item[self._qtype._fields.index(field)] = text
for field_el in topic_el:
if field_el.tag in self._qtype_map:
text = ''.join(field_el.itertext())
field = self._qtype_map[field_el.tag]
item[self._qtype._fields.index(field)] = text
if field_el.tag == 'subtopic':
text = ''.join(field_el.itertext())
subtopics.append(TrecSubtopic(field_el.attrib['number'], text, field_el.attrib['type']))
if self._subtopics_key in self._qtype._fields:
item[self._qtype._fields.index('subtopics')] = tuple(subtopics)
qid_field = self._qtype._fields.index('query_id')
item[qid_field] = item[qid_field].strip() # remove whitespace from query_ids
yield self._qtype(*item)
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecColonQueries(BaseQueries):
def __init__(self, queries_dlc, encoding=None, namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
query_id, text = line.split(':', 1)
text = text.rstrip('\n')
yield GenericQuery(query_id, text)
def queries_path(self):
return self._queries_dlc.path()
def queries_cls(self):
return GenericQuery
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecQrels(BaseQrels):
def __init__(self, qrels_dlc, qrels_defs):
self._qrels_dlc = qrels_dlc
self._qrels_defs = qrels_defs
def qrels_path(self):
return self._qrels_dlc.path()
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 4:
raise RuntimeError(f'expected 4 columns, got {len(cols)}')
qid, it, did, score = cols
yield TrecQrel(qid, did, int(score), it)
def qrels_cls(self):
return TrecQrel
def qrels_defs(self):
return self._qrels_defs
class TrecPrels(TrecQrels):
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 5:
raise RuntimeError(f'expected 5 columns, got {len(cols)}')
qid, did, rel, method, iprob = cols
yield TrecPrel(qid, did, int(rel), int(method), float(iprob))
def qrels_cls(self):
return TrecPrel
class TrecScoredDocs(BaseScoredDocs):
def __init__(self, scoreddocs_dlc):
self._scoreddocs_dlc = scoreddocs_dlc
def scoreddocs_path(self):
return self._scoreddocs_dlc.path()
def scoreddocs_iter(self):
with self._scoreddocs_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
cols = line.rstrip().split()
if len(cols) == 6:
qid, _, did, _, score, _ = cols
elif len(cols) == 2:
qid, did, score = *cols, '0'
yield GenericScoredDoc(qid, did, float(score))
| <filename>ir_datasets/formats/trec.py
import io
import codecs
import tarfile
import re
import gzip
import xml.etree.ElementTree as ET
from fnmatch import fnmatch
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from .base import GenericDoc, GenericQuery, GenericScoredDoc, BaseDocs, BaseQueries, BaseScoredDocs, BaseQrels
class TrecDoc(NamedTuple):
doc_id: str
text: str
marked_up_doc: str
class TitleUrlTextDoc(NamedTuple):
doc_id: str
title: str
url: str
text: str
class TrecQuery(NamedTuple):
query_id: str
title: str
description: str
narrative: str
class TrecSubtopic(NamedTuple):
number: str
text: str
type: str
class TrecQrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
iteration: str
class TrecPrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
method: int
iprob: float
# Default content tags from Anserini's TrecCollection
CONTENT_TAGS = 'TEXT HEADLINE TITLE HL HEAD TTL DD DATE LP LEADPARA'.split()
class TrecDocs(BaseDocs):
def __init__(self, docs_dlc, encoding=None, path_globs=None, content_tags=CONTENT_TAGS, parser='BS4', namespace=None, lang=None, expected_file_count=None, docstore_size_hint=None, count_hint=None):
self._docs_dlc = docs_dlc
self._encoding = encoding
self._path_globs = path_globs
self._content_tags = content_tags
self._parser = {
'BS4': self._parser_bs,
'text': self._parser_text,
'tut': self._parser_tut,
}[parser]
self._doc = {
'BS4': TrecDoc,
'text': GenericDoc,
'tut': TitleUrlTextDoc,
}[parser]
self._docs_namespace = namespace
self._docs_lang = lang
self._expected_file_count = expected_file_count
self._docstore_size_hint = docstore_size_hint
self._count_hint = count_hint
if expected_file_count is not None:
assert self._path_globs is not None, "expected_file_count only supported with path_globs"
def docs_path(self, force=True):
return self._docs_dlc.path(force)
@ir_datasets.util.use_docstore
def docs_iter(self):
if Path(self._docs_dlc.path()).is_dir():
if self._path_globs:
file_count = 0
for glob in sorted(self._path_globs):
for path in sorted(Path(self._docs_dlc.path()).glob(glob)):
file_count += 1
yield from self._docs_iter(path)
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
yield from self._docs_iter(self._docs_dlc.path())
else:
if self._path_globs:
file_count = 0
# tarfile, find globs, open in streaming mode (r|)
with self._docs_dlc.stream() as stream:
with tarfile.open(fileobj=stream, mode='r|gz') as tarf:
for block in tarf:
if any(fnmatch(block.name, g) for g in self._path_globs):
file = tarf.extractfile(block)
if block.name.endswith('.gz'):
file = gzip.GzipFile(fileobj=file)
yield from self._parser(file)
file_count += 1
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
with self._docs_dlc.stream() as f:
yield from self._parser(f)
def _docs_iter(self, path):
if Path(path).is_file():
if str(path).endswith('.gz'):
with gzip.open(path, 'rb') as f:
yield from self._parser(f)
else:
with path.open('rb') as f:
yield from self._parser(f)
elif Path(path).is_dir():
for child in path.iterdir():
yield from self._docs_iter(child)
def _parser_bs(self, stream):
BeautifulSoup = ir_datasets.lazy_libs.bs4().BeautifulSoup
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_markup = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
soup = BeautifulSoup(f'<OUTER>\n{doc_markup}\n</OUTER>', 'lxml')
text = soup.get_text()
yield TrecDoc(doc_id, text, doc_markup)
doc_id, doc_markup = None, ''
else:
if in_tag:
doc_markup += line
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag -= 1
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag += 1
if in_tag == 1:
doc_markup += line
def _parser_text(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_text = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
yield GenericDoc(doc_id, doc_text)
doc_id, doc_text = None, ''
else:
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag = True
def _parser_tut(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
if line.startswith('<TITLE>'):
doc_title = line.replace('<TITLE>', '').replace('</TITLE>\n', '').strip()
if line.startswith('<URL>'):
doc_url = line.replace('<URL>', '').replace('</URL>\n', '').strip()
elif line == '</DOC>\n':
yield TitleUrlTextDoc(doc_id, doc_title, doc_url, doc_text)
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
else:
if line.startswith('</TEXT>'):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<TEXT>'):
in_tag = True
def docs_cls(self):
return self._doc
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{self.docs_path(force=False)}.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
size_hint=self._docstore_size_hint,
count_hint=self._count_hint,
)
def docs_count(self):
if self.docs_store().built():
return self.docs_store().count()
def docs_namespace(self):
return self._docs_namespace
def docs_lang(self):
return self._docs_lang
DEFAULT_QTYPE_MAP = {
'<num> *(Number:)?': 'query_id',
'<title> *(Topic:)?': 'title',
'<desc> *(Description:)?': 'description',
'<narr> *(Narrative:)?': 'narrative'
}
class TrecQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, namespace=None, lang=None, remove_tags=('</title>',)):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or DEFAULT_QTYPE_MAP
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
self._remove_tags = remove_tags
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
fields, reading = {}, None
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
if line.startswith('</top>'):
assert len(fields) == len(self._qtype._fields), fields
for tag in self._remove_tags:
fields = {k: v.replace(tag, '') for k, v in fields.items()}
yield self._qtype(*(fields[f].strip() for f in self._qtype._fields))
fields, reading = {}, None
match_any = False
for tag, target in self._qtype_map.items():
match = re.match(tag, line)
if match:
fields[target] = line[match.end():]
reading = target
match_any = True
break
if not match_any and reading and not line.startswith('<'):
fields[reading] += line
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecXmlQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, subtopics_key='subtopics', namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or {f: f for f in qtype._fields}
self._encoding = encoding
self._subtopics_key = subtopics_key
self._queries_namespace = namespace
self._queries_lang = lang
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for topic_el in ET.fromstring(f.read()):
item = [None for _ in self._qtype._fields]
if 'number' in topic_el.attrib:
item[self._qtype._fields.index('query_id')] = topic_el.attrib['number']
subtopics = []
for attr in topic_el.attrib:
if attr in self._qtype_map:
text = topic_el.attrib[attr]
field = self._qtype_map[attr]
item[self._qtype._fields.index(field)] = text
if topic_el.tag in self._qtype_map:
text = ''.join(topic_el.itertext())
field = self._qtype_map[topic_el.tag]
item[self._qtype._fields.index(field)] = text
for field_el in topic_el:
if field_el.tag in self._qtype_map:
text = ''.join(field_el.itertext())
field = self._qtype_map[field_el.tag]
item[self._qtype._fields.index(field)] = text
if field_el.tag == 'subtopic':
text = ''.join(field_el.itertext())
subtopics.append(TrecSubtopic(field_el.attrib['number'], text, field_el.attrib['type']))
if self._subtopics_key in self._qtype._fields:
item[self._qtype._fields.index('subtopics')] = tuple(subtopics)
qid_field = self._qtype._fields.index('query_id')
item[qid_field] = item[qid_field].strip() # remove whitespace from query_ids
yield self._qtype(*item)
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecColonQueries(BaseQueries):
def __init__(self, queries_dlc, encoding=None, namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
query_id, text = line.split(':', 1)
text = text.rstrip('\n')
yield GenericQuery(query_id, text)
def queries_path(self):
return self._queries_dlc.path()
def queries_cls(self):
return GenericQuery
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecQrels(BaseQrels):
def __init__(self, qrels_dlc, qrels_defs):
self._qrels_dlc = qrels_dlc
self._qrels_defs = qrels_defs
def qrels_path(self):
return self._qrels_dlc.path()
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 4:
raise RuntimeError(f'expected 4 columns, got {len(cols)}')
qid, it, did, score = cols
yield TrecQrel(qid, did, int(score), it)
def qrels_cls(self):
return TrecQrel
def qrels_defs(self):
return self._qrels_defs
class TrecPrels(TrecQrels):
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 5:
raise RuntimeError(f'expected 5 columns, got {len(cols)}')
qid, did, rel, method, iprob = cols
yield TrecPrel(qid, did, int(rel), int(method), float(iprob))
def qrels_cls(self):
return TrecPrel
class TrecScoredDocs(BaseScoredDocs):
def __init__(self, scoreddocs_dlc):
self._scoreddocs_dlc = scoreddocs_dlc
def scoreddocs_path(self):
return self._scoreddocs_dlc.path()
def scoreddocs_iter(self):
with self._scoreddocs_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
cols = line.rstrip().split()
if len(cols) == 6:
qid, _, did, _, score, _ = cols
elif len(cols) == 2:
qid, did, score = *cols, '0'
yield GenericScoredDoc(qid, did, float(score))
| en | 0.42398 | # Default content tags from Anserini's TrecCollection # tarfile, find globs, open in streaming mode (r|) # remove whitespace from query_ids # ignore blank lines # ignore blank lines | 2.046735 | 2 |
textpand/download.py | caufieldjh/textpand-for-kgs | 3 | 843 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .utils import download_from_yaml
def download(output_dir: str, snippet_only: bool, ignore_cache: bool = False) -> None:
"""Downloads data files from list of URLs (default: download.yaml) into data directory (default: data/).
Args:
output_dir: A string pointing to the location to download data to.
snippet_only: Downloads only the first 5 kB of the source, for testing and file checks.
ignore_cache: Ignore cache and download files even if they exist [false]
Returns:
None.
"""
download_from_yaml(yaml_file="download.yaml",
output_dir=output_dir,
snippet_only=snippet_only,
ignore_cache=ignore_cache,
verbose=True)
return None
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .utils import download_from_yaml
def download(output_dir: str, snippet_only: bool, ignore_cache: bool = False) -> None:
"""Downloads data files from list of URLs (default: download.yaml) into data directory (default: data/).
Args:
output_dir: A string pointing to the location to download data to.
snippet_only: Downloads only the first 5 kB of the source, for testing and file checks.
ignore_cache: Ignore cache and download files even if they exist [false]
Returns:
None.
"""
download_from_yaml(yaml_file="download.yaml",
output_dir=output_dir,
snippet_only=snippet_only,
ignore_cache=ignore_cache,
verbose=True)
return None
| en | 0.612788 | #!/usr/bin/env python # -*- coding: utf-8 -*- Downloads data files from list of URLs (default: download.yaml) into data directory (default: data/). Args: output_dir: A string pointing to the location to download data to. snippet_only: Downloads only the first 5 kB of the source, for testing and file checks. ignore_cache: Ignore cache and download files even if they exist [false] Returns: None. | 2.799298 | 3 |
venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py | usegalaxy-no/usegalaxy | 1 | 844 | #!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_ontap_autosupport_invoke
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
module: na_ontap_autosupport_invoke
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
short_description: NetApp ONTAP send AutoSupport message
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: '20.4.0'
description:
- Send an AutoSupport message from a node
options:
name:
description:
- The name of the node to send the message to.
- Not specifying this option invokes AutoSupport on all nodes in the cluster.
type: str
autosupport_message:
description:
- Text sent in the subject line of the AutoSupport message.
type: str
aliases:
- message
version_added: 20.8.0
type:
description:
- Type of AutoSupport Collection to Issue.
choices: ['test', 'performance', 'all']
default: 'all'
type: str
uri:
description:
- send the AutoSupport message to the destination you specify instead of the configured destination.
type: str
'''
EXAMPLES = '''
- name: Send message
na_ontap_autosupport_invoke:
name: node1
message: invoked test autosupport rest
uri: http://1.2.3.4/delivery_uri
type: test
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPasupInvoke(object):
''' send ASUP message '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=False, type='str'),
autosupport_message=dict(required=False, type='str', aliases=["message"]),
type=dict(required=False, choices=[
'test', 'performance', 'all'], default='all'),
uri=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# REST API should be used for ONTAP 9.6 or higher.
self.rest_api = OntapRestAPI(self.module)
if self.rest_api.is_rest():
self.use_rest = True
else:
if not HAS_NETAPP_LIB:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_nodes(self):
nodes = list()
node_obj = netapp_utils.zapi.NaElement('system-node-get-iter')
desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
node_details_info = netapp_utils.zapi.NaElement('node-details-info')
node_details_info.add_new_child('node', '')
desired_attributes.add_child_elem(node_details_info)
node_obj.add_child_elem(desired_attributes)
try:
result = self.server.invoke_successfully(node_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
node_info = result.get_child_by_name('attributes-list')
if node_info is not None:
nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()]
return nodes
def send_zapi_message(self, params, node_name):
params['node-name'] = node_name
send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params)
try:
self.server.invoke_successfully(send_message, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, to_native(error)),
exception=traceback.format_exc())
def send_message(self):
params = dict()
if self.parameters.get('autosupport_message'):
params['message'] = self.parameters['autosupport_message']
if self.parameters.get('type'):
params['type'] = self.parameters['type']
if self.parameters.get('uri'):
params['uri'] = self.parameters['uri']
if self.use_rest:
if self.parameters.get('name'):
params['node.name'] = self.parameters['name']
node_name = params['node.name']
else:
node_name = '*'
api = 'support/autosupport/messages'
dummy, error = self.rest_api.post(api, params)
if error is not None:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, error))
else:
if self.parameters.get('name'):
node_names = [self.parameters['name']]
else:
# simulate REST behavior by sending to all nodes in the cluster
node_names = self.get_nodes()
for name in node_names:
self.send_zapi_message(params, name)
def ems_log_event(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
return netapp_utils.ems_log_event("na_ontap_autosupport_invoke", cserver)
def apply(self):
if not self.use_rest:
self.ems_log_event()
if self.module.check_mode:
pass
else:
self.send_message()
self.module.exit_json(changed=True)
def main():
message = NetAppONTAPasupInvoke()
message.apply()
if __name__ == '__main__':
main()
| #!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_ontap_autosupport_invoke
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
module: na_ontap_autosupport_invoke
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
short_description: NetApp ONTAP send AutoSupport message
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: '20.4.0'
description:
- Send an AutoSupport message from a node
options:
name:
description:
- The name of the node to send the message to.
- Not specifying this option invokes AutoSupport on all nodes in the cluster.
type: str
autosupport_message:
description:
- Text sent in the subject line of the AutoSupport message.
type: str
aliases:
- message
version_added: 20.8.0
type:
description:
- Type of AutoSupport Collection to Issue.
choices: ['test', 'performance', 'all']
default: 'all'
type: str
uri:
description:
- send the AutoSupport message to the destination you specify instead of the configured destination.
type: str
'''
EXAMPLES = '''
- name: Send message
na_ontap_autosupport_invoke:
name: node1
message: invoked test autosupport rest
uri: http://1.2.3.4/delivery_uri
type: test
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPasupInvoke(object):
''' send ASUP message '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=False, type='str'),
autosupport_message=dict(required=False, type='str', aliases=["message"]),
type=dict(required=False, choices=[
'test', 'performance', 'all'], default='all'),
uri=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# REST API should be used for ONTAP 9.6 or higher.
self.rest_api = OntapRestAPI(self.module)
if self.rest_api.is_rest():
self.use_rest = True
else:
if not HAS_NETAPP_LIB:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_nodes(self):
nodes = list()
node_obj = netapp_utils.zapi.NaElement('system-node-get-iter')
desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
node_details_info = netapp_utils.zapi.NaElement('node-details-info')
node_details_info.add_new_child('node', '')
desired_attributes.add_child_elem(node_details_info)
node_obj.add_child_elem(desired_attributes)
try:
result = self.server.invoke_successfully(node_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
node_info = result.get_child_by_name('attributes-list')
if node_info is not None:
nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()]
return nodes
def send_zapi_message(self, params, node_name):
params['node-name'] = node_name
send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params)
try:
self.server.invoke_successfully(send_message, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, to_native(error)),
exception=traceback.format_exc())
def send_message(self):
params = dict()
if self.parameters.get('autosupport_message'):
params['message'] = self.parameters['autosupport_message']
if self.parameters.get('type'):
params['type'] = self.parameters['type']
if self.parameters.get('uri'):
params['uri'] = self.parameters['uri']
if self.use_rest:
if self.parameters.get('name'):
params['node.name'] = self.parameters['name']
node_name = params['node.name']
else:
node_name = '*'
api = 'support/autosupport/messages'
dummy, error = self.rest_api.post(api, params)
if error is not None:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, error))
else:
if self.parameters.get('name'):
node_names = [self.parameters['name']]
else:
# simulate REST behavior by sending to all nodes in the cluster
node_names = self.get_nodes()
for name in node_names:
self.send_zapi_message(params, name)
def ems_log_event(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
return netapp_utils.ems_log_event("na_ontap_autosupport_invoke", cserver)
def apply(self):
if not self.use_rest:
self.ems_log_event()
if self.module.check_mode:
pass
else:
self.send_message()
self.module.exit_json(changed=True)
def main():
message = NetAppONTAPasupInvoke()
message.apply()
if __name__ == '__main__':
main()
| en | 0.460761 | #!/usr/bin/python # (c) 2020, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) na_ontap_autosupport_invoke module: na_ontap_autosupport_invoke author: NetApp Ansible Team (@carchi8py) <<EMAIL>> short_description: NetApp ONTAP send AutoSupport message extends_documentation_fragment: - netapp.ontap.netapp.na_ontap version_added: '20.4.0' description: - Send an AutoSupport message from a node options: name: description: - The name of the node to send the message to. - Not specifying this option invokes AutoSupport on all nodes in the cluster. type: str autosupport_message: description: - Text sent in the subject line of the AutoSupport message. type: str aliases: - message version_added: 20.8.0 type: description: - Type of AutoSupport Collection to Issue. choices: ['test', 'performance', 'all'] default: 'all' type: str uri: description: - send the AutoSupport message to the destination you specify instead of the configured destination. type: str - name: Send message na_ontap_autosupport_invoke: name: node1 message: invoked test autosupport rest uri: http://1.2.3.4/delivery_uri type: test hostname: "{{ hostname }}" username: "{{ username }}" password: "{{ password }}" send ASUP message # REST API should be used for ONTAP 9.6 or higher. # simulate REST behavior by sending to all nodes in the cluster | 1.979601 | 2 |
tests/api/serializer/test_user.py | armandomeeuwenoord/freight | 562 | 845 | from freight.api.serializer import serialize
from freight.testutils import TestCase
class UserSerializerTest(TestCase):
def test_simple(self):
user = self.create_user()
result = serialize(user)
assert result["id"] == str(user.id)
assert result["name"] == user.name
| from freight.api.serializer import serialize
from freight.testutils import TestCase
class UserSerializerTest(TestCase):
def test_simple(self):
user = self.create_user()
result = serialize(user)
assert result["id"] == str(user.id)
assert result["name"] == user.name
| none | 1 | 2.648345 | 3 |
|
binning/pozo_5m_class_dem.py | UP-RS-ESP/GEW-DAP04-WS201819 | 2 | 846 | import sys
import numpy as np
from matplotlib import pyplot as pl
from rw import WriteGTiff
fn = '../pozo-steep-vegetated-pcl.npy'
pts = np.load(fn)
x, y, z, c = pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 5]
ix = (0.2 * (x - x.min())).astype('int')
iy = (0.2 * (y - y.min())).astype('int')
shape = (100, 100)
xb = np.arange(shape[1]+1)
yb = np.arange(shape[0]+1)
fg, ax = pl.subplots(ncols = 2, nrows = 2,
figsize = (10.24, 10.24),
sharex = True, sharey = True)
uc = (2, 5)
for j in range(len(uc)):
print('Class %i' % uc[j])
b = c == uc[j]
cx, cy, cz = ix[b], iy[b], z[b]
mean = np.zeros(shape)
stdr = np.zeros(shape)
for i in range(shape[0]):
print('% 3d%%' % i)
for k in range(shape[1]):
b = (cy == i) * (cx == k)
mean[i, k] = cz[b].mean()
stdr[i, k] = cz[b].std()
fname = 'pozo_5m_dem_mean_cl%i.tif' % uc[j]
WriteGTiff(fname, mean, x.min(), y.min()+500, step = 5)
np.save('pozo_5m_dem_mean_cl%i.npy' % uc[j], mean)
np.save('pozo_5m_dem_stdr_cl%i.npy' % uc[j], stdr)
ax[0, j].set_title('Class %i' % uc[j])
im = ax[0, j].pcolormesh(xb, yb,
np.ma.masked_invalid(mean),
cmap = pl.cm.viridis_r)
cb = fg.colorbar(im, ax = ax[0, j])
cb.set_label('Mean elevation [m]')
im = ax[1, j].pcolormesh(xb, yb,
np.ma.masked_invalid(stdr),
cmap = pl.cm.magma_r)
cb = fg.colorbar(im, ax = ax[1, j])
cb.set_label('Elevation STD')
ax[0, j].set_aspect('equal')
ax[1, j].set_aspect('equal')
pl.savefig('%s.png' % sys.argv[0][:-3])
| import sys
import numpy as np
from matplotlib import pyplot as pl
from rw import WriteGTiff
fn = '../pozo-steep-vegetated-pcl.npy'
pts = np.load(fn)
x, y, z, c = pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 5]
ix = (0.2 * (x - x.min())).astype('int')
iy = (0.2 * (y - y.min())).astype('int')
shape = (100, 100)
xb = np.arange(shape[1]+1)
yb = np.arange(shape[0]+1)
fg, ax = pl.subplots(ncols = 2, nrows = 2,
figsize = (10.24, 10.24),
sharex = True, sharey = True)
uc = (2, 5)
for j in range(len(uc)):
print('Class %i' % uc[j])
b = c == uc[j]
cx, cy, cz = ix[b], iy[b], z[b]
mean = np.zeros(shape)
stdr = np.zeros(shape)
for i in range(shape[0]):
print('% 3d%%' % i)
for k in range(shape[1]):
b = (cy == i) * (cx == k)
mean[i, k] = cz[b].mean()
stdr[i, k] = cz[b].std()
fname = 'pozo_5m_dem_mean_cl%i.tif' % uc[j]
WriteGTiff(fname, mean, x.min(), y.min()+500, step = 5)
np.save('pozo_5m_dem_mean_cl%i.npy' % uc[j], mean)
np.save('pozo_5m_dem_stdr_cl%i.npy' % uc[j], stdr)
ax[0, j].set_title('Class %i' % uc[j])
im = ax[0, j].pcolormesh(xb, yb,
np.ma.masked_invalid(mean),
cmap = pl.cm.viridis_r)
cb = fg.colorbar(im, ax = ax[0, j])
cb.set_label('Mean elevation [m]')
im = ax[1, j].pcolormesh(xb, yb,
np.ma.masked_invalid(stdr),
cmap = pl.cm.magma_r)
cb = fg.colorbar(im, ax = ax[1, j])
cb.set_label('Elevation STD')
ax[0, j].set_aspect('equal')
ax[1, j].set_aspect('equal')
pl.savefig('%s.png' % sys.argv[0][:-3])
| none | 1 | 2.119353 | 2 |
|
comtypes/_meta.py | phuslu/pyMSAA | 23 | 847 | <reponame>phuslu/pyMSAA
# comtypes._meta helper module
from ctypes import POINTER, c_void_p, cast
import comtypes
################################################################
# metaclass for CoClass (in comtypes/__init__.py)
def _wrap_coclass(self):
# We are an IUnknown pointer, represented as a c_void_p instance,
# but we really want this interface:
itf = self._com_interfaces_[0]
punk = cast(self, POINTER(itf))
result = punk.QueryInterface(itf)
result.__dict__["__clsid"] = str(self._reg_clsid_)
return result
def _coclass_from_param(cls, obj):
if isinstance(obj, (cls._com_interfaces_[0], cls)):
return obj
raise TypeError(obj)
#
# The mro() of a POINTER(App) type, where class App is a subclass of CoClass:
#
# POINTER(App)
# App
# CoClass
# c_void_p
# _SimpleCData
# _CData
# object
class _coclass_meta(type):
# metaclass for CoClass
#
# When a CoClass subclass is created, create a POINTER(...) type
# for that class, with bases <coclass> and c_void_p. Also, the
# POINTER(...) type gets a __ctypes_from_outparam__ method which
# will QueryInterface for the default interface: the first one on
# the coclass' _com_interfaces_ list.
def __new__(cls, name, bases, namespace):
klass = type.__new__(cls, name, bases, namespace)
if bases == (object,):
return klass
# XXX We should insist that a _reg_clsid_ is present.
if "_reg_clsid_" in namespace:
clsid = namespace["_reg_clsid_"]
comtypes.com_coclass_registry[str(clsid)] = klass
PTR = _coclass_pointer_meta("POINTER(%s)" % klass.__name__,
(klass, c_void_p),
{"__ctypes_from_outparam__": _wrap_coclass,
"from_param": classmethod(_coclass_from_param),
})
from ctypes import _pointer_type_cache
_pointer_type_cache[klass] = PTR
return klass
# will not work if we change the order of the two base classes!
class _coclass_pointer_meta(type(c_void_p), _coclass_meta):
pass
| # comtypes._meta helper module
from ctypes import POINTER, c_void_p, cast
import comtypes
################################################################
# metaclass for CoClass (in comtypes/__init__.py)
def _wrap_coclass(self):
# We are an IUnknown pointer, represented as a c_void_p instance,
# but we really want this interface:
itf = self._com_interfaces_[0]
punk = cast(self, POINTER(itf))
result = punk.QueryInterface(itf)
result.__dict__["__clsid"] = str(self._reg_clsid_)
return result
def _coclass_from_param(cls, obj):
if isinstance(obj, (cls._com_interfaces_[0], cls)):
return obj
raise TypeError(obj)
#
# The mro() of a POINTER(App) type, where class App is a subclass of CoClass:
#
# POINTER(App)
# App
# CoClass
# c_void_p
# _SimpleCData
# _CData
# object
class _coclass_meta(type):
# metaclass for CoClass
#
# When a CoClass subclass is created, create a POINTER(...) type
# for that class, with bases <coclass> and c_void_p. Also, the
# POINTER(...) type gets a __ctypes_from_outparam__ method which
# will QueryInterface for the default interface: the first one on
# the coclass' _com_interfaces_ list.
def __new__(cls, name, bases, namespace):
klass = type.__new__(cls, name, bases, namespace)
if bases == (object,):
return klass
# XXX We should insist that a _reg_clsid_ is present.
if "_reg_clsid_" in namespace:
clsid = namespace["_reg_clsid_"]
comtypes.com_coclass_registry[str(clsid)] = klass
PTR = _coclass_pointer_meta("POINTER(%s)" % klass.__name__,
(klass, c_void_p),
{"__ctypes_from_outparam__": _wrap_coclass,
"from_param": classmethod(_coclass_from_param),
})
from ctypes import _pointer_type_cache
_pointer_type_cache[klass] = PTR
return klass
# will not work if we change the order of the two base classes!
class _coclass_pointer_meta(type(c_void_p), _coclass_meta):
pass | en | 0.738228 | # comtypes._meta helper module ################################################################ # metaclass for CoClass (in comtypes/__init__.py) # We are an IUnknown pointer, represented as a c_void_p instance, # but we really want this interface: # # The mro() of a POINTER(App) type, where class App is a subclass of CoClass: # # POINTER(App) # App # CoClass # c_void_p # _SimpleCData # _CData # object # metaclass for CoClass # # When a CoClass subclass is created, create a POINTER(...) type # for that class, with bases <coclass> and c_void_p. Also, the # POINTER(...) type gets a __ctypes_from_outparam__ method which # will QueryInterface for the default interface: the first one on # the coclass' _com_interfaces_ list. # XXX We should insist that a _reg_clsid_ is present. # will not work if we change the order of the two base classes! | 2.372825 | 2 |
bin/ADFRsuite/CCSBpckgs/mglutil/gui/BasicWidgets/Tk/Dial.py | AngelRuizMoreno/Jupyter_Dock_devel | 0 | 848 | ################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. <NAME> and TSRI 2016
##
################################################################################
#########################################################################
#
# Date: Mai 2001 Authors: <NAME>, <NAME>
#
# <EMAIL>
# <EMAIL>
#
# Copyright: <NAME>, <NAME> and TSRI
#
#########################################################################
import Tkinter
import math
import types
import sys
import os
from mglutil.util.callback import CallbackManager
from mglutil.util.misc import ensureFontCase
from optionsPanel import OptionsPanel
from KeyboardEntry import KeyboardEntry
class Dial(Tkinter.Frame, KeyboardEntry):
"""This class implements a Dial widget.
The widget has a pointer that can be moved around a circle.
The range corresponding to one full turn can be specified as well as the min
and max values that are allowed. By defaults these are set to None meaning that
there is no min and no max. One turn corresponds to 360 units by default.
A dial can also operate in discrete mode (if self.increment is set to x). In
this mode the values will be restrained to be multiples of self.increment.
The Widget has a Callback manager. Callback functions get called at every value
change if self.contiguous is set to 1, else they get called when the mouse
button is released. They always get called with the current value as an
argument.
An optional label can be displayed at the center of the Dial widget.
The size of the dial has to be specified at instanciation. Other parameters
can be set after the widget has been created.
The widget tried to adjust automatically the size of the arrow according to
the size of the dial.
The widget has a configure() method: type, min, max, increment, precision,
showLabel, value, continuous, oneTurn can be set this way.
master, labCfg and size can be passed only to the constructor.
a lock() method is used to disable the various gui components of the
options panel. Usage: <instance>.lock(<component>=<value>)
components see configure(). value is 0 or 1. 1 disables,
0 enables.
Setting values with increment enabled:
if using the method set(), the actual value will 'snap' to the next increment.
i.e., if the value is set to 3, and the increment is set to 2, setting the
value to 6 will actually result in 7 (3,5,7,9,.....)
To still be able to set the value, disregarding the current active increment,
the set method understands the optional keyword force=True, i.e.
dial.set(<value>, force=True)), which will set the value to <value>. The
increment will now be added to this new <value>
"""
def __init__(self, master=None, type='float',
labCfg={'fg':'black','side':'left', 'text':None},
min=None, max=None, increment=.0, precision=2,
showLabel=1, value=0.0, continuous=1, oneTurn=360.,
size=50, callback=None,
lockMin=0, lockBMin=0, lockMax=0, lockBMax=0,
lockIncrement=0, lockBIncrement=0,
lockPrecision=0, lockShowLabel=0, lockValue=0,
lockType=0, lockContinuous=0, lockOneTurn=0, **kw):
Tkinter.Frame.__init__(self, master)
Tkinter.Pack.config(self)
self.callbacks = CallbackManager() # object to manage callback
# functions. They get called with the
# current value as an argument
# initialize various attributes with default values
self.precision = 2 # decimal places
self.min = None # minimum value
self.max = None # maximum value
self.increment = increment # value increment
self.minOld = 0. # used to store old values
self.maxOld = 0.
self.incrementOld = increment
self.size = 50 # defines widget size
self.offsetValue = 0. # used to set increment correctly
self.lab = None # label
self.callback = None # user specified callback
self.opPanel = None # option panel widget
self.oneTurn = 360. # value increment for 1 full turn
self.value = 0.0 # current value of widget
self.oldValue = 0.0 # old value of widget
self.showLabel = 1 # turn on to display label on
self.continuous = 1 # set to 1 to call callbacks at
# each value change, else gets called
# on button release event
self.angle = 0. # angle corresponding to value
self.labCfg = labCfg # Tkinter Label options
self.labelFont = (
ensureFontCase('helvetica'), 14, 'bold') # label font
self.labelColor = 'yellow' # label color
self.canvas = None # the canvas to create the widget in
self.usedArcColor = '#aaaaaa' # filled arc color of used portion
self.unusedArcColor = '#cccccc' # filled arc color of unused portion
self.pyOver180 = math.pi/180.0 # constants used in various places
self.threeSixtyOver1turn = 1
self.piOver1turn = math.pi/360.
self.lockMin = lockMin # lock<X> vars are used in self.lock()
self.lockMax = lockMax # to lock/unlock entries in optionpanel
self.lockIncrement = lockIncrement
self.lockBMin = lockBMin
self.lockBMax = lockBMax
self.lockBIncrement = lockBIncrement
self.lockPrecision = lockPrecision
self.lockShowLabel = lockShowLabel
self.lockValue = lockValue
self.lockType = lockType
self.lockContinuous = lockContinuous
self.lockOneTurn = lockOneTurn
self.setArrow()
# configure with user-defined values
self.setSize(size)
self.setCallback(callback)
self.setContinuous(continuous)
self.setType(type)
self.setPrecision(precision)
self.setOneTurn(oneTurn)
self.setMin(min)
self.setMax(max)
self.setIncrement(increment)
self.setShowLabel(showLabel)
self.setValue(value)
self.setLabel(self.labCfg)
self.createCanvas(master)
canvas = self.canvas
canvas.bind("<ButtonPress-1>", self.mouseDown)
canvas.bind("<ButtonRelease-1>", self.mouseUp)
canvas.bind("<B1-Motion>", self.mouseMove)
canvas.bind("<Button-3>", self.toggleOptPanel)
if os.name == 'nt': #sys.platform == 'win32':
canvas.bind("<MouseWheel>", self.mouseWheel)
else:
canvas.bind("<Button-4>", self.mouseWheel)
canvas.bind("<Button-5>", self.mouseWheel)
KeyboardEntry.__init__(self, (canvas,), self.setFromEntry)
self.opPanel = OptionsPanel(master = self, title="Dial Options")
## if self.callback:
## self.callbacks.AddCallback(self.callback)
def setFromEntry(self, valueString):
try:
self.set(self.type(valueString))
except ValueError:
# fixme we would like to pop this up in a window maybe
import traceback
traceback.print_stack()
traceback.print_exc()
def handleKeyStroke(self, event):
# handle key strokes for numbers only in widget keyboard entry label
key = event.keysym
if key.isdigit() or key=='period' or key=='minus' or key=='plus':
if key == 'period':
key = '.'
elif key == 'minus':
key = '-'
elif key == 'plus':
key = '+'
self.typedValue += key
self.typedValueTK.configure(text=self.typedValue)
else:
KeyboardEntry.handleKeyStroke(self, event)
def setSize(self, size):
"""Set widget size. Size must be of type int and greater than 0"""
assert isinstance(size, types.IntType),\
"Illegal size: expected type %s, got %s"%(type(1), type(size) )
assert size > 0, "Illegal size: must be > 0, got %s"%size
self.size = size
def setCallback(self, cb):
"""Set widget callback. Must be callable function. Callback is called
every time the widget value is set/modified"""
assert cb is None or callable(cb) or type(cb) is types.ListType,\
"Illegal callback: must be either None or callable, or list. Got %s"%cb
if cb is None: return
elif type(cb) is types.ListType:
for func in cb:
assert callable(func), "Illegal callback must be callable. Got %s"%func
self.callbacks.AddCallback(func)
else:
self.callbacks.AddCallback(cb)
self.callback = cb
def toggleOptPanel(self, event=None):
if self.opPanel.flag:
self.opPanel.Dismiss_cb()
else:
if not hasattr(self.opPanel, 'optionsForm'):
self.opPanel.displayPanel(create=1)
else:
self.opPanel.displayPanel(create=0)
def setArrow(self, size=None):
if size is not None:
self.setSize(size)
aS = self.size/40
self.arrowLength = max(3, 3*aS) # arrow head length
self.arrowWidth = max(2, aS) # half the arrow body width
self.arrowBorderwidth = max(1, self.arrowWidth/2) # width of arrow
# shadow lines
self.arrowHeadWidth = 2*self.arrowWidth # width of arrow head base
def mouseDown(self, event):
# remember where the mouse went down
self.lastx = event.x
self.lasty = event.y
def mouseUp(self, event):
# call callbacks if not in continuous mode
if not self.continuous:
self.callbacks.CallCallbacks(self.opPanel.valInput.get())
if self.showLabel == 2:
# no widget labels on mouse release
self.canvas.itemconfigure(self.labelId2, text='')
self.canvas.itemconfigure(self.labelId, text='')
def mouseMove(self, event):
dx = event.x-self.xm
dy = self.ym-event.y
n = math.sqrt(dx*dx+dy*dy)
if n == 0.0: v = [0.0, 0.0]
else: v = [dx/n, dy/n]
# find the cosine of the angle between new hand position and previous
# hand position
ma = v[0]*self.vector[0] + v[1]*self.vector[1]
# assure no rounding errors
if ma > 1.0: ma = 1.0
elif ma < -1.0: ma = -1.0
# compute angle increment compared to current vector
ang = math.acos(ma)
# find the sign of the rotation, sign of z component of vector prod.
oldv = self.vector
normz = oldv[0]*v[1] - oldv[1]*v[0]
if normz>0: ang = -1. * ang
# compute the new value
val = self.value + ang*self.oneTurnOver2pi
self.set(val)
self.lastx = event.x
self.lasty = event.y
def mouseWheel(self, event):
#print "mouseWheel", event, event.num
if os.name == 'nt': #sys.platform == 'win32':
if event.delta > 0:
lEventNum = 4
else:
lEventNum = 5
else:
lEventNum = event.num
if lEventNum == 4:
self.set(self.value+self.oneTurn)
else:
self.set(self.value-self.oneTurn)
def get(self):
return self.type(self.value)
def printLabel(self):
if self.canvas is None:
return
self.canvas.itemconfigure(self.labelId2,
text=self.labelFormat%self.value)#newVal)
self.canvas.itemconfigure(self.labelId,
text=self.labelFormat%self.value)#newVal)
def set(self, val, update=1, force=0):
# if force is set to 1, we call this method regardless of the
# widget configuration. This is for example the case if the dial
# is set to continuous=0, but the value is set in the options panel
# snap to closest increment
if self.increment is not None and self.increment != 0. and not force:
offset = self.offsetValue%self.increment
dval = round(val/self.increment) * self.increment
if val < dval:
dval = dval + offset - self.increment
else:
dval = dval + offset
if self.min is not None and dval < self.min:
dval = self.min
elif self.max is not None and dval > self.max:
dval = self.max
# recompute vector and angle corresponding to val
self.angle = (dval%self.oneTurn)*self.threeSixtyOver1turn
if dval <0.0:
self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = dval
self.offsetValue = dval
else:
# 'regular' mode, i.e. no step-wise increment
if self.min is not None and val < self.min: val = self.min
elif self.max is not None and val > self.max: val = self.max
# recompute vector and angle corresponding to val
self.angle = (val%self.oneTurn)*self.threeSixtyOver1turn
if val <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = val
self.offsetValue = val
#update arrow in display
self.drawArrow()
newVal = self.get()
if self.continuous or force:
if update and self.oldValue != newVal or force:
self.oldValue = newVal
self.callbacks.CallCallbacks(newVal)
if self.showLabel==2:
self.printLabel()
else:
if self.showLabel==2:
self.printLabel()
if self.showLabel==1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%newVal)
def drawArrow(self):
if self.canvas is None:
return
# end point
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym - self.vector[1]*self.rad
# point at arrow head base
xb = self.xm + self.vector[0]*self.radNoArrow
yb = self.xm - self.vector[1]*self.radNoArrow
# vector orthogonal to arrow
n = [-self.vector[1], -self.vector[0]]
pts1 = [ self.xm+n[0]*self.arrowWidth, self.ym+n[1]*self.arrowWidth,
xb+n[0]*self.arrowWidth, yb+n[1]*self.arrowWidth,
xb+n[0]*self.arrowHeadWidth, yb+n[1]*self.arrowHeadWidth,
x1, y1 ]
pts2 = [ x1, y1,
xb-n[0]*self.arrowHeadWidth, yb-n[1]*self.arrowHeadWidth,
xb-n[0]*self.arrowWidth, yb-n[1]*self.arrowWidth,
self.xm-n[0]*self.arrowWidth, self.ym-n[1]*self.arrowWidth ]
canvas = self.canvas
if self.vector[0] > 0.0:
col1 = '#DDDDDD'
col2 = 'black'
else:
col1 = 'black'
col2 = '#DDDDDD'
apply( canvas.coords, (self.arrowPolId,) + tuple(pts1+pts2) )
apply( canvas.coords, (self.arrowPolborder1,) + tuple(pts1) )
canvas.itemconfigure( self.arrowPolborder1, fill=col1 )
apply( canvas.coords, (self.arrowPolborder2,) + tuple(pts2) )
canvas.itemconfigure( self.arrowPolborder2, fill=col2 )
canvas.itemconfigure(self.arcId, extent = 0.0-self.angle)
def createCanvas(self, master):
size = self.size
self.frame = Tkinter.Frame(self, borderwidth=3, relief='sunken')
self.canvas = Tkinter.Canvas(self.frame, width=size+2, height=size+2)
self.xm = self.ym = size/2+2
self.rad = size/2
self.radNoArrow = self.rad-self.arrowLength
self.vector = [0, 1]
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym + self.vector[1]*self.rad
canvas = self.canvas
self.circleId = canvas.create_oval(2,2,size,size, width=1,
fill=self.unusedArcColor)
self.arcId = canvas.create_arc(2,2,size,size, start=90.,
extent=0, fill=self.usedArcColor)
canvas.create_line(2, self.ym, size+2, self.ym)
canvas.create_line(self.xm, 2, self.ym, size+2)
self.arrowPolId = canvas.create_polygon( 0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
fill='gray75' )
self.arrowPolborder1 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='black',
width = self.arrowBorderwidth)
self.arrowPolborder2 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='white',
width = self.arrowBorderwidth )
r = size/20
off = self.arrowBorderwidth
canvas.create_oval(self.xm-r,self.ym-r-off/2,self.xm+r,self.ym+r-off/2,
fill='#DDDDDD', outline='white')
canvas.create_oval(self.xm-r,self.ym-r+off,self.xm+r,self.ym+r+off,
fill='black', outline='black')
canvas.create_oval(self.xm-r,self.ym-r,self.xm+r,self.ym+r,
fill='gray70', outline='#DDDDDD')
self.labelId2 = canvas.create_text(self.xm+2, self.ym+2,
fill='black',
justify='center', text='',
font = self.labelFont)
self.labelId = canvas.create_text(self.xm, self.ym,
fill=self.labelColor,
justify='center', text='',
font = self.labelFont)
self.drawArrow()
self.opPanel = OptionsPanel(master = self, title="Dial Options")
# pack em up
self.canvas.pack(side=Tkinter.TOP)
self.frame.pack(expand=1, fill='x')
self.toggleWidgetLabel(self.showLabel)
def toggleWidgetLabel(self, val):
if val == 0:
# no widget labels
self.showLabel=0
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
if val == 1:
# show always widget labels
self.showLabel=1
self.printLabel()
if val == 2:
# show widget labels only when mouse moves
self.showLabel=2
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
def setValue(self, val):
if type(val) == types.StringType:
val = float(val)
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for value: expected %s or %s, got %s"%(
type(1), type(1.0), type(val) )
# setValue does NOT call a callback!
if self.min is not None and val < self.min: val = self.min
if self.max is not None and val > self.max: val = self.max
self.value = self.type(val)
self.offsetValue=self.value
self.oldValue = self.value
#update arrow in display
self.angle = (self.value%self.oneTurn)*self.threeSixtyOver1turn
if self.value <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.drawArrow()
if self.showLabel == 1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%self.value)
def setLabel(self, labCfg):
self.labCfg = labCfg
text = labCfg.get('text', None)
if text is None or text=='':
return
d={}
for k, w in self.labCfg.items():
if k == 'side': continue
else: d[k] = w
if not 'side' in self.labCfg.keys():
self.labCfg['side'] = 'left'
if not self.lab:
self.lab = Tkinter.Label(self, d)
self.lab.pack(side=self.labCfg['side'])
self.lab.bind("<Button-3>", self.toggleOptPanel)
else:
self.lab.configure(text)
#####################################################################
# the 'configure' methods:
#####################################################################
def configure(self, **kw):
for key,value in kw.items():
# the 'set' parameter callbacks
if key=='labCfg': self.setLabel(value)
elif key=='type': self.setType(value)
elif key=='min': self.setMin(value)
elif key=='max': self.setMax(value)
elif key=='increment': self.setIncrement(value)
elif key=='precision': self.setPrecision(value)
elif key=='showLabel': self.setShowLabel(value)
elif key=='continuous': self.setContinuous(value)
elif key=='oneTurn': self.setOneTurn(value)
# the 'lock' entries callbacks
elif key=='lockType': self.lockTypeCB(value)
elif key=='lockMin': self.lockMinCB(value)
elif key=='lockBMin': self.lockBMinCB(value)
elif key=='lockMax': self.lockMaxCB(value)
elif key=='lockBMax': self.lockBMaxCB(value)
elif key=='lockIncrement': self.lockIncrementCB(value)
elif key=='lockBIncrement': self.lockBIncrementCB(value)
elif key=='lockPrecision': self.lockPrecisionCB(value)
elif key=='lockShowLabel': self.lockShowLabelCB(value)
elif key=='lockValue': self.lockValueCB(value)
elif key=='lockContinuous': self.lockContinuousCB(value)
elif key=='lockOneTurn': self.lockOneTurnCB(value)
def setType(self, Type):
assert type(Type) in [types.StringType, types.TypeType],\
"Illegal type for datatype. Expected %s or %s, got %s"%(
type('a'), type(type), type(Type) )
if type(Type) == type(""): # type str
assert Type in ('int','float'),\
"Illegal type descriptor. Expected 'int' or 'float', got '%s'"%Type
self.type = eval(Type)
else:
self.type = Type
if self.type == int:
self.labelFormat = "%d"
self.int_value = self.value
else:
self.labelFormat = "%."+str(self.precision)+"f"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togIntFloat']['widget']
if self.type == int:
w.setvalue('int')
elif self.type == 'float':
w.setvalue('float')
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setMin(self, min):
if min is not None:
assert type(min) in [types.IntType, types.FloatType],\
"Illegal type for minimum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(min) )
if self.max and min > self.max:
min = self.max
self.min = self.type(min)
if self.showLabel == 1:
self.printLabel()
if self.value < self.min:
self.set(self.min)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.minInput.set(self.labelFormat%self.min)
self.opPanel.toggleMin.set(1)
self.opPanel.min_entry.configure(state='normal', fg='gray0')
self.minOld = self.min
else:
self.min = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMin.set(0)
self.opPanel.min_entry.configure(state='disabled',
fg='gray40')
def setMax(self, max):
if max is not None:
assert type(max) in [types.IntType, types.FloatType],\
"Illegal type for maximum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(max) )
if self.min and max < self.min:
max = self.min
self.max = self.type(max)
if self.showLabel == 1:
self.printLabel()
if self.value > self.max:
self.set(self.max)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.maxInput.set(self.labelFormat%self.max)
self.opPanel.toggleMax.set(1)
self.opPanel.max_entry.configure(state='normal', fg='gray0')
self.maxOld = self.max
else:
self.max = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMax.set(0)
self.opPanel.max_entry.configure(state='disabled', fg='gray40')
def setIncrement(self, incr):
if incr is not None:
assert type(incr) in [types.IntType, types.FloatType],\
"Illegal type for increment. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(incr) )
self.increment = self.type(incr)
self.offsetValue = self.value
self.incrementOld = self.increment
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.incrInput.set(self.labelFormat%self.increment)
self.opPanel.toggleIncr.set(1)
self.opPanel.incr_entry.configure(state='normal', fg='gray0')
else:
self.increment = self.type(0)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleIncr.set(0)
self.opPanel.incrInput.set(self.labelFormat%0)
self.opPanel.incr_entry.configure(state='disabled',
fg='gray40')
def setPrecision(self, val):
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for precision. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(val) )
val = int(val)
if val > 10:
val = 10
if val < 1:
val = 1
self.precision = val
if self.type == float:
self.labelFormat = "%."+str(self.precision)+"f"
else:
self.labelFormat = "%d"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['selPrec']['widget']
w.setvalue(val)
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setContinuous(self, cont):
""" cont can be None, 0 or 1 """
assert cont in [None, 0, 1],\
"Illegal value for continuous: expected None, 0 or 1, got %s"%cont
if cont != 1:
cont = None
self.continuous = cont
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togCont']['widget']
if cont:
w.setvalue('on')#i=1
else:
w.setvalue('off')#i=0
if self.opPanel:
self.opPanel.updateDisplay()
def setShowLabel(self, val):
"""Show label can be 0, 1 or 2
0: no label
1: label is always shown
2: show label only when value changes"""
assert val in [0,1,2],\
"Illegal value for showLabel. Expected 0, 1 or 2, got %s"%val
if val != 0 and val != 1 and val != 2:
print "Illegal value. Must be 0, 1 or 2"
return
self.showLabel = val
self.toggleWidgetLabel(val)
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togLabel']['widget']
if self.showLabel == 0:
label = 'never'
elif self.showLabel == 1:
label = 'always'
elif self.showLabel == 2:
label = 'move'
w.setvalue(label)
if self.opPanel:
self.opPanel.updateDisplay()
def setOneTurn(self, oneTurn):
assert type(oneTurn) in [types.IntType, types.FloatType],\
"Illegal type for oneTurn. Expected %s or %s, got %s"%(
type(0), type(0.0), type(oneTurn) )
self.oneTurn = oneTurn
self.threeSixtyOver1turn = 360./oneTurn
self.piOver1turn = math.pi/oneTurn
self.oneTurnOver2pi = oneTurn / (2*math.pi)
if self.opPanel:
self.opPanel.updateDisplay()
#####################################################################
# the 'lock' methods:
#####################################################################
def lockTypeCB(self, mode):
if mode != 0: mode = 1
self.lockType = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMinCB(self, mode): #min entry field
if mode != 0: mode = 1
self.lockMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMinCB(self, mode): # min checkbutton
if mode != 0: mode = 1
self.lockBMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMaxCB(self, mode): # max entry field
if mode != 0: mode = 1
self.lockMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMaxCB(self, mode): # max checkbutton
if mode != 0: mode = 1
self.lockBMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockIncrementCB(self, mode): # increment entry field
if mode != 0: mode = 1
self.lockIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBIncrementCB(self, mode): # increment checkbutton
if mode != 0: mode = 1
self.lockBIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockPrecisionCB(self, mode):
if mode != 0: mode = 1
self.lockPrecision = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockShowLabelCB(self, mode):
if mode != 0: mode = 1
self.lockShowLabel = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockValueCB(self, mode):
if mode != 0: mode = 1
self.lockValue = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockContinuousCB(self, mode):
if mode != 0: mode = 1
self.lockContinuous = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockOneTurnCB(self, mode):
if mode != 0: mode = 1
self.lockOneTurn = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
if __name__ == '__main__':
def foo(val):
print val
d = Dial(size=50)
d.configure(showLabel=1)
d.callbacks.AddCallback(foo)
| ################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. <NAME> and TSRI 2016
##
################################################################################
#########################################################################
#
# Date: Mai 2001 Authors: <NAME>, <NAME>
#
# <EMAIL>
# <EMAIL>
#
# Copyright: <NAME>, <NAME> and TSRI
#
#########################################################################
import Tkinter
import math
import types
import sys
import os
from mglutil.util.callback import CallbackManager
from mglutil.util.misc import ensureFontCase
from optionsPanel import OptionsPanel
from KeyboardEntry import KeyboardEntry
class Dial(Tkinter.Frame, KeyboardEntry):
"""This class implements a Dial widget.
The widget has a pointer that can be moved around a circle.
The range corresponding to one full turn can be specified as well as the min
and max values that are allowed. By defaults these are set to None meaning that
there is no min and no max. One turn corresponds to 360 units by default.
A dial can also operate in discrete mode (if self.increment is set to x). In
this mode the values will be restrained to be multiples of self.increment.
The Widget has a Callback manager. Callback functions get called at every value
change if self.contiguous is set to 1, else they get called when the mouse
button is released. They always get called with the current value as an
argument.
An optional label can be displayed at the center of the Dial widget.
The size of the dial has to be specified at instanciation. Other parameters
can be set after the widget has been created.
The widget tried to adjust automatically the size of the arrow according to
the size of the dial.
The widget has a configure() method: type, min, max, increment, precision,
showLabel, value, continuous, oneTurn can be set this way.
master, labCfg and size can be passed only to the constructor.
a lock() method is used to disable the various gui components of the
options panel. Usage: <instance>.lock(<component>=<value>)
components see configure(). value is 0 or 1. 1 disables,
0 enables.
Setting values with increment enabled:
if using the method set(), the actual value will 'snap' to the next increment.
i.e., if the value is set to 3, and the increment is set to 2, setting the
value to 6 will actually result in 7 (3,5,7,9,.....)
To still be able to set the value, disregarding the current active increment,
the set method understands the optional keyword force=True, i.e.
dial.set(<value>, force=True)), which will set the value to <value>. The
increment will now be added to this new <value>
"""
def __init__(self, master=None, type='float',
labCfg={'fg':'black','side':'left', 'text':None},
min=None, max=None, increment=.0, precision=2,
showLabel=1, value=0.0, continuous=1, oneTurn=360.,
size=50, callback=None,
lockMin=0, lockBMin=0, lockMax=0, lockBMax=0,
lockIncrement=0, lockBIncrement=0,
lockPrecision=0, lockShowLabel=0, lockValue=0,
lockType=0, lockContinuous=0, lockOneTurn=0, **kw):
Tkinter.Frame.__init__(self, master)
Tkinter.Pack.config(self)
self.callbacks = CallbackManager() # object to manage callback
# functions. They get called with the
# current value as an argument
# initialize various attributes with default values
self.precision = 2 # decimal places
self.min = None # minimum value
self.max = None # maximum value
self.increment = increment # value increment
self.minOld = 0. # used to store old values
self.maxOld = 0.
self.incrementOld = increment
self.size = 50 # defines widget size
self.offsetValue = 0. # used to set increment correctly
self.lab = None # label
self.callback = None # user specified callback
self.opPanel = None # option panel widget
self.oneTurn = 360. # value increment for 1 full turn
self.value = 0.0 # current value of widget
self.oldValue = 0.0 # old value of widget
self.showLabel = 1 # turn on to display label on
self.continuous = 1 # set to 1 to call callbacks at
# each value change, else gets called
# on button release event
self.angle = 0. # angle corresponding to value
self.labCfg = labCfg # Tkinter Label options
self.labelFont = (
ensureFontCase('helvetica'), 14, 'bold') # label font
self.labelColor = 'yellow' # label color
self.canvas = None # the canvas to create the widget in
self.usedArcColor = '#aaaaaa' # filled arc color of used portion
self.unusedArcColor = '#cccccc' # filled arc color of unused portion
self.pyOver180 = math.pi/180.0 # constants used in various places
self.threeSixtyOver1turn = 1
self.piOver1turn = math.pi/360.
self.lockMin = lockMin # lock<X> vars are used in self.lock()
self.lockMax = lockMax # to lock/unlock entries in optionpanel
self.lockIncrement = lockIncrement
self.lockBMin = lockBMin
self.lockBMax = lockBMax
self.lockBIncrement = lockBIncrement
self.lockPrecision = lockPrecision
self.lockShowLabel = lockShowLabel
self.lockValue = lockValue
self.lockType = lockType
self.lockContinuous = lockContinuous
self.lockOneTurn = lockOneTurn
self.setArrow()
# configure with user-defined values
self.setSize(size)
self.setCallback(callback)
self.setContinuous(continuous)
self.setType(type)
self.setPrecision(precision)
self.setOneTurn(oneTurn)
self.setMin(min)
self.setMax(max)
self.setIncrement(increment)
self.setShowLabel(showLabel)
self.setValue(value)
self.setLabel(self.labCfg)
self.createCanvas(master)
canvas = self.canvas
canvas.bind("<ButtonPress-1>", self.mouseDown)
canvas.bind("<ButtonRelease-1>", self.mouseUp)
canvas.bind("<B1-Motion>", self.mouseMove)
canvas.bind("<Button-3>", self.toggleOptPanel)
if os.name == 'nt': #sys.platform == 'win32':
canvas.bind("<MouseWheel>", self.mouseWheel)
else:
canvas.bind("<Button-4>", self.mouseWheel)
canvas.bind("<Button-5>", self.mouseWheel)
KeyboardEntry.__init__(self, (canvas,), self.setFromEntry)
self.opPanel = OptionsPanel(master = self, title="Dial Options")
## if self.callback:
## self.callbacks.AddCallback(self.callback)
def setFromEntry(self, valueString):
try:
self.set(self.type(valueString))
except ValueError:
# fixme we would like to pop this up in a window maybe
import traceback
traceback.print_stack()
traceback.print_exc()
def handleKeyStroke(self, event):
# handle key strokes for numbers only in widget keyboard entry label
key = event.keysym
if key.isdigit() or key=='period' or key=='minus' or key=='plus':
if key == 'period':
key = '.'
elif key == 'minus':
key = '-'
elif key == 'plus':
key = '+'
self.typedValue += key
self.typedValueTK.configure(text=self.typedValue)
else:
KeyboardEntry.handleKeyStroke(self, event)
def setSize(self, size):
"""Set widget size. Size must be of type int and greater than 0"""
assert isinstance(size, types.IntType),\
"Illegal size: expected type %s, got %s"%(type(1), type(size) )
assert size > 0, "Illegal size: must be > 0, got %s"%size
self.size = size
def setCallback(self, cb):
"""Set widget callback. Must be callable function. Callback is called
every time the widget value is set/modified"""
assert cb is None or callable(cb) or type(cb) is types.ListType,\
"Illegal callback: must be either None or callable, or list. Got %s"%cb
if cb is None: return
elif type(cb) is types.ListType:
for func in cb:
assert callable(func), "Illegal callback must be callable. Got %s"%func
self.callbacks.AddCallback(func)
else:
self.callbacks.AddCallback(cb)
self.callback = cb
def toggleOptPanel(self, event=None):
if self.opPanel.flag:
self.opPanel.Dismiss_cb()
else:
if not hasattr(self.opPanel, 'optionsForm'):
self.opPanel.displayPanel(create=1)
else:
self.opPanel.displayPanel(create=0)
def setArrow(self, size=None):
if size is not None:
self.setSize(size)
aS = self.size/40
self.arrowLength = max(3, 3*aS) # arrow head length
self.arrowWidth = max(2, aS) # half the arrow body width
self.arrowBorderwidth = max(1, self.arrowWidth/2) # width of arrow
# shadow lines
self.arrowHeadWidth = 2*self.arrowWidth # width of arrow head base
def mouseDown(self, event):
# remember where the mouse went down
self.lastx = event.x
self.lasty = event.y
def mouseUp(self, event):
# call callbacks if not in continuous mode
if not self.continuous:
self.callbacks.CallCallbacks(self.opPanel.valInput.get())
if self.showLabel == 2:
# no widget labels on mouse release
self.canvas.itemconfigure(self.labelId2, text='')
self.canvas.itemconfigure(self.labelId, text='')
def mouseMove(self, event):
dx = event.x-self.xm
dy = self.ym-event.y
n = math.sqrt(dx*dx+dy*dy)
if n == 0.0: v = [0.0, 0.0]
else: v = [dx/n, dy/n]
# find the cosine of the angle between new hand position and previous
# hand position
ma = v[0]*self.vector[0] + v[1]*self.vector[1]
# assure no rounding errors
if ma > 1.0: ma = 1.0
elif ma < -1.0: ma = -1.0
# compute angle increment compared to current vector
ang = math.acos(ma)
# find the sign of the rotation, sign of z component of vector prod.
oldv = self.vector
normz = oldv[0]*v[1] - oldv[1]*v[0]
if normz>0: ang = -1. * ang
# compute the new value
val = self.value + ang*self.oneTurnOver2pi
self.set(val)
self.lastx = event.x
self.lasty = event.y
def mouseWheel(self, event):
#print "mouseWheel", event, event.num
if os.name == 'nt': #sys.platform == 'win32':
if event.delta > 0:
lEventNum = 4
else:
lEventNum = 5
else:
lEventNum = event.num
if lEventNum == 4:
self.set(self.value+self.oneTurn)
else:
self.set(self.value-self.oneTurn)
def get(self):
return self.type(self.value)
def printLabel(self):
if self.canvas is None:
return
self.canvas.itemconfigure(self.labelId2,
text=self.labelFormat%self.value)#newVal)
self.canvas.itemconfigure(self.labelId,
text=self.labelFormat%self.value)#newVal)
def set(self, val, update=1, force=0):
# if force is set to 1, we call this method regardless of the
# widget configuration. This is for example the case if the dial
# is set to continuous=0, but the value is set in the options panel
# snap to closest increment
if self.increment is not None and self.increment != 0. and not force:
offset = self.offsetValue%self.increment
dval = round(val/self.increment) * self.increment
if val < dval:
dval = dval + offset - self.increment
else:
dval = dval + offset
if self.min is not None and dval < self.min:
dval = self.min
elif self.max is not None and dval > self.max:
dval = self.max
# recompute vector and angle corresponding to val
self.angle = (dval%self.oneTurn)*self.threeSixtyOver1turn
if dval <0.0:
self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = dval
self.offsetValue = dval
else:
# 'regular' mode, i.e. no step-wise increment
if self.min is not None and val < self.min: val = self.min
elif self.max is not None and val > self.max: val = self.max
# recompute vector and angle corresponding to val
self.angle = (val%self.oneTurn)*self.threeSixtyOver1turn
if val <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = val
self.offsetValue = val
#update arrow in display
self.drawArrow()
newVal = self.get()
if self.continuous or force:
if update and self.oldValue != newVal or force:
self.oldValue = newVal
self.callbacks.CallCallbacks(newVal)
if self.showLabel==2:
self.printLabel()
else:
if self.showLabel==2:
self.printLabel()
if self.showLabel==1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%newVal)
def drawArrow(self):
if self.canvas is None:
return
# end point
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym - self.vector[1]*self.rad
# point at arrow head base
xb = self.xm + self.vector[0]*self.radNoArrow
yb = self.xm - self.vector[1]*self.radNoArrow
# vector orthogonal to arrow
n = [-self.vector[1], -self.vector[0]]
pts1 = [ self.xm+n[0]*self.arrowWidth, self.ym+n[1]*self.arrowWidth,
xb+n[0]*self.arrowWidth, yb+n[1]*self.arrowWidth,
xb+n[0]*self.arrowHeadWidth, yb+n[1]*self.arrowHeadWidth,
x1, y1 ]
pts2 = [ x1, y1,
xb-n[0]*self.arrowHeadWidth, yb-n[1]*self.arrowHeadWidth,
xb-n[0]*self.arrowWidth, yb-n[1]*self.arrowWidth,
self.xm-n[0]*self.arrowWidth, self.ym-n[1]*self.arrowWidth ]
canvas = self.canvas
if self.vector[0] > 0.0:
col1 = '#DDDDDD'
col2 = 'black'
else:
col1 = 'black'
col2 = '#DDDDDD'
apply( canvas.coords, (self.arrowPolId,) + tuple(pts1+pts2) )
apply( canvas.coords, (self.arrowPolborder1,) + tuple(pts1) )
canvas.itemconfigure( self.arrowPolborder1, fill=col1 )
apply( canvas.coords, (self.arrowPolborder2,) + tuple(pts2) )
canvas.itemconfigure( self.arrowPolborder2, fill=col2 )
canvas.itemconfigure(self.arcId, extent = 0.0-self.angle)
def createCanvas(self, master):
size = self.size
self.frame = Tkinter.Frame(self, borderwidth=3, relief='sunken')
self.canvas = Tkinter.Canvas(self.frame, width=size+2, height=size+2)
self.xm = self.ym = size/2+2
self.rad = size/2
self.radNoArrow = self.rad-self.arrowLength
self.vector = [0, 1]
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym + self.vector[1]*self.rad
canvas = self.canvas
self.circleId = canvas.create_oval(2,2,size,size, width=1,
fill=self.unusedArcColor)
self.arcId = canvas.create_arc(2,2,size,size, start=90.,
extent=0, fill=self.usedArcColor)
canvas.create_line(2, self.ym, size+2, self.ym)
canvas.create_line(self.xm, 2, self.ym, size+2)
self.arrowPolId = canvas.create_polygon( 0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
fill='gray75' )
self.arrowPolborder1 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='black',
width = self.arrowBorderwidth)
self.arrowPolborder2 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='white',
width = self.arrowBorderwidth )
r = size/20
off = self.arrowBorderwidth
canvas.create_oval(self.xm-r,self.ym-r-off/2,self.xm+r,self.ym+r-off/2,
fill='#DDDDDD', outline='white')
canvas.create_oval(self.xm-r,self.ym-r+off,self.xm+r,self.ym+r+off,
fill='black', outline='black')
canvas.create_oval(self.xm-r,self.ym-r,self.xm+r,self.ym+r,
fill='gray70', outline='#DDDDDD')
self.labelId2 = canvas.create_text(self.xm+2, self.ym+2,
fill='black',
justify='center', text='',
font = self.labelFont)
self.labelId = canvas.create_text(self.xm, self.ym,
fill=self.labelColor,
justify='center', text='',
font = self.labelFont)
self.drawArrow()
self.opPanel = OptionsPanel(master = self, title="Dial Options")
# pack em up
self.canvas.pack(side=Tkinter.TOP)
self.frame.pack(expand=1, fill='x')
self.toggleWidgetLabel(self.showLabel)
def toggleWidgetLabel(self, val):
if val == 0:
# no widget labels
self.showLabel=0
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
if val == 1:
# show always widget labels
self.showLabel=1
self.printLabel()
if val == 2:
# show widget labels only when mouse moves
self.showLabel=2
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
def setValue(self, val):
if type(val) == types.StringType:
val = float(val)
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for value: expected %s or %s, got %s"%(
type(1), type(1.0), type(val) )
# setValue does NOT call a callback!
if self.min is not None and val < self.min: val = self.min
if self.max is not None and val > self.max: val = self.max
self.value = self.type(val)
self.offsetValue=self.value
self.oldValue = self.value
#update arrow in display
self.angle = (self.value%self.oneTurn)*self.threeSixtyOver1turn
if self.value <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.drawArrow()
if self.showLabel == 1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%self.value)
def setLabel(self, labCfg):
self.labCfg = labCfg
text = labCfg.get('text', None)
if text is None or text=='':
return
d={}
for k, w in self.labCfg.items():
if k == 'side': continue
else: d[k] = w
if not 'side' in self.labCfg.keys():
self.labCfg['side'] = 'left'
if not self.lab:
self.lab = Tkinter.Label(self, d)
self.lab.pack(side=self.labCfg['side'])
self.lab.bind("<Button-3>", self.toggleOptPanel)
else:
self.lab.configure(text)
#####################################################################
# the 'configure' methods:
#####################################################################
def configure(self, **kw):
for key,value in kw.items():
# the 'set' parameter callbacks
if key=='labCfg': self.setLabel(value)
elif key=='type': self.setType(value)
elif key=='min': self.setMin(value)
elif key=='max': self.setMax(value)
elif key=='increment': self.setIncrement(value)
elif key=='precision': self.setPrecision(value)
elif key=='showLabel': self.setShowLabel(value)
elif key=='continuous': self.setContinuous(value)
elif key=='oneTurn': self.setOneTurn(value)
# the 'lock' entries callbacks
elif key=='lockType': self.lockTypeCB(value)
elif key=='lockMin': self.lockMinCB(value)
elif key=='lockBMin': self.lockBMinCB(value)
elif key=='lockMax': self.lockMaxCB(value)
elif key=='lockBMax': self.lockBMaxCB(value)
elif key=='lockIncrement': self.lockIncrementCB(value)
elif key=='lockBIncrement': self.lockBIncrementCB(value)
elif key=='lockPrecision': self.lockPrecisionCB(value)
elif key=='lockShowLabel': self.lockShowLabelCB(value)
elif key=='lockValue': self.lockValueCB(value)
elif key=='lockContinuous': self.lockContinuousCB(value)
elif key=='lockOneTurn': self.lockOneTurnCB(value)
def setType(self, Type):
assert type(Type) in [types.StringType, types.TypeType],\
"Illegal type for datatype. Expected %s or %s, got %s"%(
type('a'), type(type), type(Type) )
if type(Type) == type(""): # type str
assert Type in ('int','float'),\
"Illegal type descriptor. Expected 'int' or 'float', got '%s'"%Type
self.type = eval(Type)
else:
self.type = Type
if self.type == int:
self.labelFormat = "%d"
self.int_value = self.value
else:
self.labelFormat = "%."+str(self.precision)+"f"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togIntFloat']['widget']
if self.type == int:
w.setvalue('int')
elif self.type == 'float':
w.setvalue('float')
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setMin(self, min):
if min is not None:
assert type(min) in [types.IntType, types.FloatType],\
"Illegal type for minimum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(min) )
if self.max and min > self.max:
min = self.max
self.min = self.type(min)
if self.showLabel == 1:
self.printLabel()
if self.value < self.min:
self.set(self.min)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.minInput.set(self.labelFormat%self.min)
self.opPanel.toggleMin.set(1)
self.opPanel.min_entry.configure(state='normal', fg='gray0')
self.minOld = self.min
else:
self.min = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMin.set(0)
self.opPanel.min_entry.configure(state='disabled',
fg='gray40')
def setMax(self, max):
if max is not None:
assert type(max) in [types.IntType, types.FloatType],\
"Illegal type for maximum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(max) )
if self.min and max < self.min:
max = self.min
self.max = self.type(max)
if self.showLabel == 1:
self.printLabel()
if self.value > self.max:
self.set(self.max)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.maxInput.set(self.labelFormat%self.max)
self.opPanel.toggleMax.set(1)
self.opPanel.max_entry.configure(state='normal', fg='gray0')
self.maxOld = self.max
else:
self.max = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMax.set(0)
self.opPanel.max_entry.configure(state='disabled', fg='gray40')
def setIncrement(self, incr):
if incr is not None:
assert type(incr) in [types.IntType, types.FloatType],\
"Illegal type for increment. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(incr) )
self.increment = self.type(incr)
self.offsetValue = self.value
self.incrementOld = self.increment
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.incrInput.set(self.labelFormat%self.increment)
self.opPanel.toggleIncr.set(1)
self.opPanel.incr_entry.configure(state='normal', fg='gray0')
else:
self.increment = self.type(0)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleIncr.set(0)
self.opPanel.incrInput.set(self.labelFormat%0)
self.opPanel.incr_entry.configure(state='disabled',
fg='gray40')
def setPrecision(self, val):
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for precision. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(val) )
val = int(val)
if val > 10:
val = 10
if val < 1:
val = 1
self.precision = val
if self.type == float:
self.labelFormat = "%."+str(self.precision)+"f"
else:
self.labelFormat = "%d"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['selPrec']['widget']
w.setvalue(val)
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setContinuous(self, cont):
""" cont can be None, 0 or 1 """
assert cont in [None, 0, 1],\
"Illegal value for continuous: expected None, 0 or 1, got %s"%cont
if cont != 1:
cont = None
self.continuous = cont
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togCont']['widget']
if cont:
w.setvalue('on')#i=1
else:
w.setvalue('off')#i=0
if self.opPanel:
self.opPanel.updateDisplay()
def setShowLabel(self, val):
"""Show label can be 0, 1 or 2
0: no label
1: label is always shown
2: show label only when value changes"""
assert val in [0,1,2],\
"Illegal value for showLabel. Expected 0, 1 or 2, got %s"%val
if val != 0 and val != 1 and val != 2:
print "Illegal value. Must be 0, 1 or 2"
return
self.showLabel = val
self.toggleWidgetLabel(val)
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togLabel']['widget']
if self.showLabel == 0:
label = 'never'
elif self.showLabel == 1:
label = 'always'
elif self.showLabel == 2:
label = 'move'
w.setvalue(label)
if self.opPanel:
self.opPanel.updateDisplay()
def setOneTurn(self, oneTurn):
assert type(oneTurn) in [types.IntType, types.FloatType],\
"Illegal type for oneTurn. Expected %s or %s, got %s"%(
type(0), type(0.0), type(oneTurn) )
self.oneTurn = oneTurn
self.threeSixtyOver1turn = 360./oneTurn
self.piOver1turn = math.pi/oneTurn
self.oneTurnOver2pi = oneTurn / (2*math.pi)
if self.opPanel:
self.opPanel.updateDisplay()
#####################################################################
# the 'lock' methods:
#####################################################################
def lockTypeCB(self, mode):
if mode != 0: mode = 1
self.lockType = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMinCB(self, mode): #min entry field
if mode != 0: mode = 1
self.lockMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMinCB(self, mode): # min checkbutton
if mode != 0: mode = 1
self.lockBMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMaxCB(self, mode): # max entry field
if mode != 0: mode = 1
self.lockMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMaxCB(self, mode): # max checkbutton
if mode != 0: mode = 1
self.lockBMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockIncrementCB(self, mode): # increment entry field
if mode != 0: mode = 1
self.lockIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBIncrementCB(self, mode): # increment checkbutton
if mode != 0: mode = 1
self.lockBIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockPrecisionCB(self, mode):
if mode != 0: mode = 1
self.lockPrecision = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockShowLabelCB(self, mode):
if mode != 0: mode = 1
self.lockShowLabel = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockValueCB(self, mode):
if mode != 0: mode = 1
self.lockValue = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockContinuousCB(self, mode):
if mode != 0: mode = 1
self.lockContinuous = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockOneTurnCB(self, mode):
if mode != 0: mode = 1
self.lockOneTurn = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
if __name__ == '__main__':
def foo(val):
print val
d = Dial(size=50)
d.configure(showLabel=1)
d.callbacks.AddCallback(foo)
| en | 0.675834 | ################################################################################ ## ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Lesser General Public ## License as published by the Free Software Foundation; either ## version 2.1 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public ## License along with this library; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ## ## (C) Copyrights Dr. <NAME> and TSRI 2016 ## ################################################################################ ######################################################################### # # Date: Mai 2001 Authors: <NAME>, <NAME> # # <EMAIL> # <EMAIL> # # Copyright: <NAME>, <NAME> and TSRI # ######################################################################### This class implements a Dial widget. The widget has a pointer that can be moved around a circle. The range corresponding to one full turn can be specified as well as the min and max values that are allowed. By defaults these are set to None meaning that there is no min and no max. One turn corresponds to 360 units by default. A dial can also operate in discrete mode (if self.increment is set to x). In this mode the values will be restrained to be multiples of self.increment. The Widget has a Callback manager. Callback functions get called at every value change if self.contiguous is set to 1, else they get called when the mouse button is released. They always get called with the current value as an argument. An optional label can be displayed at the center of the Dial widget. The size of the dial has to be specified at instanciation. Other parameters can be set after the widget has been created. The widget tried to adjust automatically the size of the arrow according to the size of the dial. The widget has a configure() method: type, min, max, increment, precision, showLabel, value, continuous, oneTurn can be set this way. master, labCfg and size can be passed only to the constructor. a lock() method is used to disable the various gui components of the options panel. Usage: <instance>.lock(<component>=<value>) components see configure(). value is 0 or 1. 1 disables, 0 enables. Setting values with increment enabled: if using the method set(), the actual value will 'snap' to the next increment. i.e., if the value is set to 3, and the increment is set to 2, setting the value to 6 will actually result in 7 (3,5,7,9,.....) To still be able to set the value, disregarding the current active increment, the set method understands the optional keyword force=True, i.e. dial.set(<value>, force=True)), which will set the value to <value>. The increment will now be added to this new <value> # object to manage callback # functions. They get called with the # current value as an argument # initialize various attributes with default values # decimal places # minimum value # maximum value # value increment # used to store old values # defines widget size # used to set increment correctly # label # user specified callback # option panel widget # value increment for 1 full turn # current value of widget # old value of widget # turn on to display label on # set to 1 to call callbacks at # each value change, else gets called # on button release event # angle corresponding to value # Tkinter Label options # label font # label color # the canvas to create the widget in # filled arc color of used portion # filled arc color of unused portion # constants used in various places # lock<X> vars are used in self.lock() # to lock/unlock entries in optionpanel # configure with user-defined values #sys.platform == 'win32': ## if self.callback: ## self.callbacks.AddCallback(self.callback) # fixme we would like to pop this up in a window maybe # handle key strokes for numbers only in widget keyboard entry label Set widget size. Size must be of type int and greater than 0 Set widget callback. Must be callable function. Callback is called every time the widget value is set/modified # arrow head length # half the arrow body width # width of arrow # shadow lines # width of arrow head base # remember where the mouse went down # call callbacks if not in continuous mode # no widget labels on mouse release # find the cosine of the angle between new hand position and previous # hand position # assure no rounding errors # compute angle increment compared to current vector # find the sign of the rotation, sign of z component of vector prod. # compute the new value #print "mouseWheel", event, event.num #sys.platform == 'win32': #newVal) #newVal) # if force is set to 1, we call this method regardless of the # widget configuration. This is for example the case if the dial # is set to continuous=0, but the value is set in the options panel # snap to closest increment # recompute vector and angle corresponding to val # 'regular' mode, i.e. no step-wise increment # recompute vector and angle corresponding to val #update arrow in display # end point # point at arrow head base # vector orthogonal to arrow # pack em up # no widget labels # show always widget labels # show widget labels only when mouse moves # setValue does NOT call a callback! #update arrow in display ##################################################################### # the 'configure' methods: ##################################################################### # the 'set' parameter callbacks # the 'lock' entries callbacks # type str # and update the printed label # and update the printed label cont can be None, 0 or 1 #i=1 #i=0 Show label can be 0, 1 or 2 0: no label 1: label is always shown 2: show label only when value changes ##################################################################### # the 'lock' methods: ##################################################################### #min entry field # min checkbutton # max entry field # max checkbutton # increment entry field # increment checkbutton | 0.95558 | 1 |
qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py | kevinlq/Qt-Creator-Opensource-Study | 5 | 849 | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import sys
import stat
import difflib
import inspect
import getopt
def referenceFile():
if sys.platform.startswith('linux'):
filename = 'makeinstall.linux'
elif sys.platform.startswith('win'):
filename = 'makeinstall.windows'
elif sys.platform == 'darwin':
filename = 'makeinstall.darwin'
else:
print "Unsupported platform: ", sys.platform
sys.exit(-1)
scriptDir = os.path.dirname(inspect.getfile(inspect.currentframe()))
return os.path.join(scriptDir,'..','tests', 'reference', filename)
def readReferenceFile():
# read file with old diff
f = open(referenceFile(), 'r');
filelist = []
for line in f:
filelist.append(line)
f.close()
return filelist
def generateReference(rootdir):
fileDict = {}
for root, subFolders, files in os.walk(rootdir):
for file in (subFolders + files):
f = os.path.join(root,file)
perm = os.stat(f).st_mode & 0777
if os.path.getsize(f) == 0:
print "'%s' is empty!" % f
fileDict[f[len(rootdir)+1:]] = perm
# generate new list
formattedlist = []
for name, perm in sorted(fileDict.iteritems()):
formattedlist.append("%o %s\n"% (perm, name))
return formattedlist;
def usage():
print "Usage: %s [-g | --generate] <dir>" % os.path.basename(sys.argv[0])
def main():
generateMode = False
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hg', ['help', 'generate'])
except:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-g', '--generate'):
generateMode = True
if len(args) != 1:
usage()
sys.exit(2)
rootdir = args[0]
if generateMode:
f = open(referenceFile(), 'w')
for item in generateReference(rootdir):
f.write(item)
f.close()
print "Do not forget to commit", referenceFile()
else:
hasDiff = False
for line in difflib.unified_diff(readReferenceFile(), generateReference(rootdir), fromfile=referenceFile(), tofile="generated"):
sys.stdout.write(line)
hasDiff = True
if hasDiff:
sys.exit(1)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import sys
import stat
import difflib
import inspect
import getopt
def referenceFile():
if sys.platform.startswith('linux'):
filename = 'makeinstall.linux'
elif sys.platform.startswith('win'):
filename = 'makeinstall.windows'
elif sys.platform == 'darwin':
filename = 'makeinstall.darwin'
else:
print "Unsupported platform: ", sys.platform
sys.exit(-1)
scriptDir = os.path.dirname(inspect.getfile(inspect.currentframe()))
return os.path.join(scriptDir,'..','tests', 'reference', filename)
def readReferenceFile():
# read file with old diff
f = open(referenceFile(), 'r');
filelist = []
for line in f:
filelist.append(line)
f.close()
return filelist
def generateReference(rootdir):
fileDict = {}
for root, subFolders, files in os.walk(rootdir):
for file in (subFolders + files):
f = os.path.join(root,file)
perm = os.stat(f).st_mode & 0777
if os.path.getsize(f) == 0:
print "'%s' is empty!" % f
fileDict[f[len(rootdir)+1:]] = perm
# generate new list
formattedlist = []
for name, perm in sorted(fileDict.iteritems()):
formattedlist.append("%o %s\n"% (perm, name))
return formattedlist;
def usage():
print "Usage: %s [-g | --generate] <dir>" % os.path.basename(sys.argv[0])
def main():
generateMode = False
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hg', ['help', 'generate'])
except:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-g', '--generate'):
generateMode = True
if len(args) != 1:
usage()
sys.exit(2)
rootdir = args[0]
if generateMode:
f = open(referenceFile(), 'w')
for item in generateReference(rootdir):
f.write(item)
f.close()
print "Do not forget to commit", referenceFile()
else:
hasDiff = False
for line in difflib.unified_diff(readReferenceFile(), generateReference(rootdir), fromfile=referenceFile(), tofile="generated"):
sys.stdout.write(line)
hasDiff = True
if hasDiff:
sys.exit(1)
if __name__ == "__main__":
main()
| en | 0.753215 | #!/usr/bin/env python ############################################################################ # # Copyright (C) 2016 The Qt Company Ltd. # Contact: https://www.qt.io/licensing/ # # This file is part of Qt Creator. # # Commercial License Usage # Licensees holding valid commercial Qt licenses may use this file in # accordance with the commercial license agreement provided with the # Software or, alternatively, in accordance with the terms contained in # a written agreement between you and The Qt Company. For licensing terms # and conditions see https://www.qt.io/terms-conditions. For further # information use the contact form at https://www.qt.io/contact-us. # # GNU General Public License Usage # Alternatively, this file may be used under the terms of the GNU # General Public License version 3 as published by the Free Software # Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT # included in the packaging of this file. Please review the following # information to ensure the GNU General Public License requirements will # be met: https://www.gnu.org/licenses/gpl-3.0.html. # ############################################################################ # read file with old diff # generate new list | 1.378142 | 1 |
deep_sdf/workspace.py | huajian1069/non-convex_optimisation | 2 | 850 | <reponame>huajian1069/non-convex_optimisation
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import os
import torch
model_params_subdir = "ModelParameters"
optimizer_params_subdir = "OptimizerParameters"
latent_codes_subdir = "LatentCodes"
logs_filename = "Logs.pth"
reconstructions_subdir = "Reconstructions"
reconstruction_meshes_subdir = "Meshes"
reconstruction_codes_subdir = "Codes"
optimizations_subdir = "Optimizations"
optimizations_meshes_subdir = "Meshes"
optimizations_codes_subdir = "Codes"
specifications_filename = "specs.json"
data_source_map_filename = ".datasources.json"
evaluation_subdir = "Evaluation"
sdf_samples_subdir = "SdfSamples"
renders_subdir = "Renders"
surface_samples_subdir = "SurfaceSamples"
normalization_param_subdir = "NormalizationParameters"
training_meshes_subdir = "TrainingMeshes"
def load_experiment_specifications(experiment_directory):
filename = os.path.join(experiment_directory, specifications_filename)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include specifications file "
+ '"specs.json"'.format(experiment_directory)
)
return json.load(open(filename))
def load_model_parameters(experiment_directory, checkpoint, decoder):
filename = os.path.join(
experiment_directory, model_params_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception('model state dict "{}" does not exist'.format(filename))
data = torch.load(filename)
decoder.load_state_dict(data["model_state_dict"])
return data["epoch"]
def build_decoder(experiment_directory, experiment_specs):
arch = __import__(
"networks." + experiment_specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = experiment_specs["CodeLength"]
decoder = arch.Decoder(latent_size, **experiment_specs["NetworkSpecs"]).cuda()
return decoder
def load_decoder(
experiment_directory, experiment_specs, checkpoint, data_parallel=True
):
decoder = build_decoder(experiment_directory, experiment_specs)
if data_parallel:
decoder = torch.nn.DataParallel(decoder)
epoch = load_model_parameters(experiment_directory, checkpoint, decoder)
return (decoder, epoch)
def load_latent_vectors(experiment_directory, checkpoint):
filename = os.path.join(
experiment_directory, latent_codes_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include a latent code file"
+ " for checkpoint '{}'".format(experiment_directory, checkpoint)
)
data = torch.load(filename)
if isinstance(data["latent_codes"], torch.Tensor):
num_vecs = data["latent_codes"].size()[0]
lat_vecs = []
for i in range(num_vecs):
lat_vecs.append(data["latent_codes"][i].cuda())
return lat_vecs
else:
num_embeddings, embedding_dim = data["latent_codes"]["weight"].shape
lat_vecs = torch.nn.Embedding(num_embeddings, embedding_dim)
lat_vecs.load_state_dict(data["latent_codes"])
return lat_vecs.weight.data.detach()
def get_data_source_map_filename(data_dir):
return os.path.join(data_dir, data_source_map_filename)
def get_reconstructed_mesh_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_meshes_subdir,
dataset,
class_name,
instance_name + ".ply",
)
def get_reconstructed_code_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_codes_subdir,
dataset,
class_name,
instance_name + ".pth",
)
def get_evaluation_dir(experiment_dir, checkpoint, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, evaluation_subdir, checkpoint)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_model_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, model_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_optimizer_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, optimizer_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_latent_codes_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, latent_codes_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_normalization_params_filename(
data_dir, dataset_name, class_name, instance_name
):
return os.path.join(
data_dir,
normalization_param_subdir,
dataset_name,
class_name,
instance_name + ".npz",
)
| #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import os
import torch
model_params_subdir = "ModelParameters"
optimizer_params_subdir = "OptimizerParameters"
latent_codes_subdir = "LatentCodes"
logs_filename = "Logs.pth"
reconstructions_subdir = "Reconstructions"
reconstruction_meshes_subdir = "Meshes"
reconstruction_codes_subdir = "Codes"
optimizations_subdir = "Optimizations"
optimizations_meshes_subdir = "Meshes"
optimizations_codes_subdir = "Codes"
specifications_filename = "specs.json"
data_source_map_filename = ".datasources.json"
evaluation_subdir = "Evaluation"
sdf_samples_subdir = "SdfSamples"
renders_subdir = "Renders"
surface_samples_subdir = "SurfaceSamples"
normalization_param_subdir = "NormalizationParameters"
training_meshes_subdir = "TrainingMeshes"
def load_experiment_specifications(experiment_directory):
filename = os.path.join(experiment_directory, specifications_filename)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include specifications file "
+ '"specs.json"'.format(experiment_directory)
)
return json.load(open(filename))
def load_model_parameters(experiment_directory, checkpoint, decoder):
filename = os.path.join(
experiment_directory, model_params_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception('model state dict "{}" does not exist'.format(filename))
data = torch.load(filename)
decoder.load_state_dict(data["model_state_dict"])
return data["epoch"]
def build_decoder(experiment_directory, experiment_specs):
arch = __import__(
"networks." + experiment_specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = experiment_specs["CodeLength"]
decoder = arch.Decoder(latent_size, **experiment_specs["NetworkSpecs"]).cuda()
return decoder
def load_decoder(
experiment_directory, experiment_specs, checkpoint, data_parallel=True
):
decoder = build_decoder(experiment_directory, experiment_specs)
if data_parallel:
decoder = torch.nn.DataParallel(decoder)
epoch = load_model_parameters(experiment_directory, checkpoint, decoder)
return (decoder, epoch)
def load_latent_vectors(experiment_directory, checkpoint):
filename = os.path.join(
experiment_directory, latent_codes_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include a latent code file"
+ " for checkpoint '{}'".format(experiment_directory, checkpoint)
)
data = torch.load(filename)
if isinstance(data["latent_codes"], torch.Tensor):
num_vecs = data["latent_codes"].size()[0]
lat_vecs = []
for i in range(num_vecs):
lat_vecs.append(data["latent_codes"][i].cuda())
return lat_vecs
else:
num_embeddings, embedding_dim = data["latent_codes"]["weight"].shape
lat_vecs = torch.nn.Embedding(num_embeddings, embedding_dim)
lat_vecs.load_state_dict(data["latent_codes"])
return lat_vecs.weight.data.detach()
def get_data_source_map_filename(data_dir):
return os.path.join(data_dir, data_source_map_filename)
def get_reconstructed_mesh_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_meshes_subdir,
dataset,
class_name,
instance_name + ".ply",
)
def get_reconstructed_code_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_codes_subdir,
dataset,
class_name,
instance_name + ".pth",
)
def get_evaluation_dir(experiment_dir, checkpoint, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, evaluation_subdir, checkpoint)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_model_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, model_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_optimizer_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, optimizer_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_latent_codes_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, latent_codes_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_normalization_params_filename(
data_dir, dataset_name, class_name, instance_name
):
return os.path.join(
data_dir,
normalization_param_subdir,
dataset_name,
class_name,
instance_name + ".npz",
) | en | 0.450713 | #!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. | 2.166815 | 2 |
EmoPy/EmoPy/examples/convolutional_dropout_model.py | Rahmatullina/FinalYearProject | 0 | 851 | <reponame>Rahmatullina/FinalYearProject<filename>EmoPy/EmoPy/examples/convolutional_dropout_model.py
from EmoPy.src.fermodel import FERModel
from EmoPy.src.directory_data_loader import DirectoryDataLoader
from EmoPy.src.csv_data_loader import CSVDataLoader
from EmoPy.src.data_generator import DataGenerator
from EmoPy.src.neuralnets import ConvolutionalNNDropout
from sklearn.model_selection import train_test_split
import numpy as np
from pkg_resources import resource_filename,resource_exists
validation_split = 0.15
target_dimensions = (48, 48)
channels = 1
verbose = True
print('--------------- Convolutional Dropout Model -------------------')
print('Loading data...')
directory_path = resource_filename('EmoPy.examples','image_data/sample_image_directory')
data_loader = DirectoryDataLoader(datapath=directory_path, validation_split=validation_split)
dataset = data_loader.load_data()
if verbose:
dataset.print_data_details()
print('Preparing training/testing data...')
train_images, train_labels = dataset.get_training_data()
train_gen = DataGenerator().fit(train_images, train_labels)
test_images, test_labels = dataset.get_test_data()
test_gen = DataGenerator().fit(test_images, test_labels)
print('Training net...')
model = ConvolutionalNNDropout(target_dimensions, channels, dataset.get_emotion_index_map(), verbose=True)
model.fit_generator(train_gen.generate(target_dimensions, batch_size=5),
test_gen.generate(target_dimensions, batch_size=5),
epochs=15)
# Save model configuration
# model.export_model('output/conv2d_model.json','output/conv2d_weights.h5',"output/conv2d_emotion_map.json", emotion_map)
| from EmoPy.src.fermodel import FERModel
from EmoPy.src.directory_data_loader import DirectoryDataLoader
from EmoPy.src.csv_data_loader import CSVDataLoader
from EmoPy.src.data_generator import DataGenerator
from EmoPy.src.neuralnets import ConvolutionalNNDropout
from sklearn.model_selection import train_test_split
import numpy as np
from pkg_resources import resource_filename,resource_exists
validation_split = 0.15
target_dimensions = (48, 48)
channels = 1
verbose = True
print('--------------- Convolutional Dropout Model -------------------')
print('Loading data...')
directory_path = resource_filename('EmoPy.examples','image_data/sample_image_directory')
data_loader = DirectoryDataLoader(datapath=directory_path, validation_split=validation_split)
dataset = data_loader.load_data()
if verbose:
dataset.print_data_details()
print('Preparing training/testing data...')
train_images, train_labels = dataset.get_training_data()
train_gen = DataGenerator().fit(train_images, train_labels)
test_images, test_labels = dataset.get_test_data()
test_gen = DataGenerator().fit(test_images, test_labels)
print('Training net...')
model = ConvolutionalNNDropout(target_dimensions, channels, dataset.get_emotion_index_map(), verbose=True)
model.fit_generator(train_gen.generate(target_dimensions, batch_size=5),
test_gen.generate(target_dimensions, batch_size=5),
epochs=15)
# Save model configuration
# model.export_model('output/conv2d_model.json','output/conv2d_weights.h5',"output/conv2d_emotion_map.json", emotion_map) | en | 0.294864 | # Save model configuration # model.export_model('output/conv2d_model.json','output/conv2d_weights.h5',"output/conv2d_emotion_map.json", emotion_map) | 2.398914 | 2 |
ENV/lib/python3.6/site-packages/pyramid_jinja2/tests/extensions.py | captain-c00keys/pyramid-stocks | 0 | 852 | <filename>ENV/lib/python3.6/site-packages/pyramid_jinja2/tests/extensions.py
from jinja2 import nodes
from jinja2.ext import Extension
class TestExtension(Extension):
tags = {'test_ext'}
def parse(self, parser): return nodes.Const("This is test extension")
| <filename>ENV/lib/python3.6/site-packages/pyramid_jinja2/tests/extensions.py
from jinja2 import nodes
from jinja2.ext import Extension
class TestExtension(Extension):
tags = {'test_ext'}
def parse(self, parser): return nodes.Const("This is test extension")
| none | 1 | 2.153995 | 2 |
|
deepstream_ignition_usb_yolo.py | valdivj/Deepstream-IGN-Maker-YOLO | 18 | 853 | <reponame>valdivj/Deepstream-IGN-Maker-YOLO
#!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import sys
sys.path.append('../')
sys.path.insert(0, "../../../client_libraries/python/")
import paho.mqtt.client as mqtt
import sparkplug_b as sparkplug
import time
import time, threading
import random
import string
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from sparkplug_b import *
import pyds
# Application Variables
serverUrl = "localhost"
myGroupId = "Sparkplug B Devices"
myNodeName = "NVIDIA"
myDeviceName = "XavierNX"
publishPeriod = 5000
myUsername = "admin"
myPassword = "<PASSWORD>"
client = mqtt.Client(serverUrl, 1883, 60)
WAIT_SECONDS = 1
frame_numberx = 0
num_rectsx = 0
counter1 = 0
counter2 = 0
Object1 = 0
Object2 = 0
Object3 = 0
Object4 = 0
Object5 = 0
Object6 = 0
Object7 = 0
Object8 = 0
Object9 = 0
Object10 = 0
newValue1 = 0
newValue2 = 0
newValue3 = 0
newValue4 = 0
newValue5 = 0
newValue6 = 0
newValue7 = 0
newValue8 = 0
newValue9 = 0
newValue10 = 0
class AliasMap:
Next_Server = 0
Rebirth = 1
Reboot = 2
Device_frame_numberx = 3
Device_num_rectsx = 4
Device_Metric0 = 5
Device_Metric1 = 6
Device_Metric2 = 7
Device_Metric3 = 8
Device_Metric4 = 9
Device_counter1 = 10
Device_counter2 = 11
Device_Input1 = 12
Device_Input2 = 13
Device_Input3 = 14
Device_Input4 = 15
Device_Input5 = 16
Device_Input6 = 17
Device_Input7 = 18
Device_Input8 = 19
Device_Input9 = 20
Device_Input10 = 21
Device_Output1 = 22
Device_Output2 = 23
Device_Output3 = 24
Device_Output4 = 25
Device_Output5 = 26
Device_Output6 = 27
Device_Output7 = 28
Device_Output8 = 29
Device_Output9 = 30
Device_Output10 = 31
MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_TOOTHBRUSH = 79
PGIE_CLASS_ID_HAIR_DRYER = 78
PGIE_CLASS_ID_TEDDY_BEAR = 77
PGIE_CLASS_ID_SCISSORS = 76
PGIE_CLASS_ID_VASE = 75
PGIE_CLASS_ID_CLOCK = 74
PGIE_CLASS_ID_BOOK = 73
PGIE_CLASS_ID_REFRIGERATOR = 72
PGIE_CLASS_ID_SINK = 71
PGIE_CLASS_ID_TOASTER = 70
PGIE_CLASS_ID_OVEN = 69
PGIE_CLASS_ID_MICROWAVE = 68
PGIE_CLASS_ID_CELL_PHONE = 67
PGIE_CLASS_ID_KEYBOARD = 66
PGIE_CLASS_ID_REMOTE = 65
PGIE_CLASS_ID_MOUSE = 64
PGIE_CLASS_ID_LAPTOP = 63
PGIE_CLASS_ID_TVMONITOR = 62
PGIE_CLASS_ID_TOILET = 61
PGIE_CLASS_ID_DININGTABLE= 60
PGIE_CLASS_ID_BED = 59
PGIE_CLASS_ID_POTTEDPLANT = 58
PGIE_CLASS_ID_SOFA = 57
PGIE_CLASS_ID_CHAIR = 56
PGIE_CLASS_ID_CAKE = 55
PGIE_CLASS_ID_DONUT = 54
PGIE_CLASS_ID_PIZZA = 53
PGIE_CLASS_ID_HOT_DOG = 52
PGIE_CLASS_ID_CARROT = 51
PGIE_CLASS_ID_BROCCOLI = 50
PGIE_CLASS_ID_ORANGE = 49
PGIE_CLASS_ID_SANDWICH = 48
PGIE_CLASS_ID_APPLE = 47
PGIE_CLASS_ID_BANANA = 46
PGIE_CLASS_ID_BOWL = 45
PGIE_CLASS_ID_SPOON = 44
PGIE_CLASS_ID_KNIFE = 43
PGIE_CLASS_ID_FORK = 42
PGIE_CLASS_ID_CUP = 41
PGIE_CLASS_ID_WINE_GLASS = 40
PGIE_CLASS_ID_BOTTLE = 39
PGIE_CLASS_ID_TENNIS_RACKET = 38
PGIE_CLASS_ID_SURFBOARD = 37
PGIE_CLASS_ID_SKATEBOARD = 36
PGIE_CLASS_ID_BASEBALL_GLOVE = 35
PGIE_CLASS_ID_BASEBALL_BAT = 34
PGIE_CLASS_ID_KITE = 33
PGIE_CLASS_ID_SPORTS_BALL = 32
PGIE_CLASS_ID_SNOWBOARD = 31
PGIE_CLASS_ID_SKIS = 30
PGIE_CLASS_ID_FRISBEE = 29
PGIE_CLASS_ID_SUITCASE = 28
PGIE_CLASS_ID_TIE = 27
PGIE_CLASS_ID_HANDBAG = 26
PGIE_CLASS_ID_UMBRELLA = 25
PGIE_CLASS_ID_BACKPACK = 24
PGIE_CLASS_ID_GIRAFFE = 23
PGIE_CLASS_ID_ZEBRA = 22
PGIE_CLASS_ID_BEAR = 21
PGIE_CLASS_ID_ELEPHANT = 20
PGIE_CLASS_ID_COW = 19
PGIE_CLASS_ID_SHEEP = 18
PGIE_CLASS_ID_HORSE = 17
PGIE_CLASS_ID_DOG = 16
PGIE_CLASS_ID_CAT = 15
PGIE_CLASS_ID_BIRD = 14
PGIE_CLASS_ID_BENCH = 13
PGIE_CLASS_ID_PARKING_METER = 12
PGIE_CLASS_ID_STOP_SIGN = 11
PGIE_CLASS_ID_FIRE_HYDRANT = 10
PGIE_CLASS_ID_TRAFFIC_LIGHT = 9
PGIE_CLASS_ID_BOAT = 8
PGIE_CLASS_ID_TRUCK = 7
PGIE_CLASS_ID_TRAIN = 6
PGIE_CLASS_ID_BUS = 5
PGIE_CLASS_ID_AEROPLANE = 4
PGIE_CLASS_ID_MOTORBIKE = 3
PGIE_CLASS_ID_VEHICLE = 2
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 0
pgie_classes_str= ["Toothbrush", "Hair dryer", "Teddy bear","Scissors","Vase", "Clock", "Book","Refrigerator", "Sink", "Toaster","Oven","Microwave", "Cell phone", "Keyboard","Remote", "Mouse", "Laptop","Tvmonitor","Toilet", "Diningtable", "Bed","Pottedplant", "Sofa", "Chair","Cake","Donut", "Pizza", "Hot dog","Carrot", "Broccli", "Orange","Sandwich","Apple", "Banana", "Bowl","Spoon", "Knife", "Fork","Cup","Wine Glass", "Bottle", "Tennis racket","Surfboard", "Skateboard", "Baseball glove","Baseball bat","Kite", "Sports ball", "Snowboard","Skis", "Frisbee", "Suitcase","Tie","Handbag", "Umbrella", "Backpack","Giraffe", "Zebra", "Bear","Elephant","Cow", "Sheep", "Horse","Dog", "Cat", "Bird","Bench","Parking meter", "Stop sign", "Fire hydrant","Traffic light", "Boat", "Truck","Train","Bus", "Areoplane", "Motorbike","Car", "Bicycle", "Person"]
######################################################################
# The callback for when the client receives a CONNACK response from the server.
######################################################################
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected with result code "+str(rc))
else:
print("Failed to connect with result code "+str(rc))
sys.exit()
global myGroupId
global myNodeName
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("spBv1.0/" + myGroupId + "/NCMD/" + myNodeName + "/#")
client.subscribe("spBv1.0/" + myGroupId + "/DCMD/" + myNodeName + "/#")
######################################################################
######################################################################
# The callback for when a PUBLISH message is received from the server.
######################################################################
def on_message(client, userdata, msg):
print("Message arrived: " + msg.topic)
tokens = msg.topic.split("/")
global newValue1
global newValue2
global newValue3
global newValue4
global newValue5
global newValue6
global newValue7
global newValue8
global newValue9
global newValue10
if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and (tokens[2] == "NCMD" or tokens[2] == "DCMD") and tokens[3] == myNodeName:
inboundPayload = sparkplug_b_pb2.Payload()
inboundPayload.ParseFromString(msg.payload)
for metric in inboundPayload.metrics:
if metric.name == "Node Control/Next Server" or metric.alias == AliasMap.Next_Server:
# 'Node Control/Next Server' is an NCMD used to tell the device/client application to
# disconnect from the current MQTT server and connect to the next MQTT server in the
# list of available servers. This is used for clients that have a pool of MQTT servers
# to connect to.
print ("'Node Control/Next Server' is not implemented in this example")
elif metric.name == "Node Control/Rebirth" or metric.alias == AliasMap.Rebirth:
# 'Node Control/Rebirth' is an NCMD used to tell the device/client application to resend
# its full NBIRTH and DBIRTH again. MQTT Engine will send this NCMD to a device/client
# application if it receives an NDATA or DDATA with a metric that was not published in the
# original NBIRTH or DBIRTH. This is why the application must send all known metrics in
# its original NBIRTH and DBIRTH messages.
publishBirth()
elif metric.name == "Node Control/Reboot" or metric.alias == AliasMap.Reboot:
# 'Node Control/Reboot' is an NCMD used to tell a device/client application to reboot
# This can be used for devices that need a full application reset via a soft reboot.
# In this case, we fake a full reboot with a republishing of the NBIRTH and DBIRTH
# messages.
publishBirth()
elif metric.name == "output/Device Metric2" or metric.alias == AliasMap.Device_Metric2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.int_value
print ("CMD message for output/Device Metric2 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric2, MetricDataType.Int16, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 1
#publishBirth()
elif metric.name == "output/Device Input1" or metric.alias == AliasMap.Device_Input1:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue1 = metric.int_value
print ("CMD message for output/Device Input1 - New Value: {}".format(newValue1))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input1, MetricDataType.Int16, newValue1)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 2
#publishBirth()
elif metric.name == "output/Device Input2" or metric.alias == AliasMap.Device_Input2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue2 = metric.int_value
print ("CMD message for output/Device Input2 - New Value: {}".format(newValue2))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input2, MetricDataType.Int16, newValue2)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 3
#publishBirth()
elif metric.name == "output/Device Input3" or metric.alias == AliasMap.Device_Input3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue3 = metric.int_value
print ("CMD message for output/Device Input3 - New Value: {}".format(newValue3))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input3, MetricDataType.Int16, newValue3)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 4
#publishBirth()
elif metric.name == "output/Device Input4" or metric.alias == AliasMap.Device_Input4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue4 = metric.int_value
print ("CMD message for output/Device Input4 - New Value: {}".format(newValue4))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input4, MetricDataType.Int16, newValue4)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 5
#publishBirth()
elif metric.name == "output/Device Input5" or metric.alias == AliasMap.Device_Input5:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue5 = metric.int_value
print ("CMD message for output/Device Input5 - New Value: {}".format(newValue5))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input5, MetricDataType.Int16, newValue5)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 6
#publishBirth()
elif metric.name == "output/Device Input6" or metric.alias == AliasMap.Device_Input6:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue6 = metric.int_value
print ("CMD message for output/Device Input6 - New Value: {}".format(newValue6))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input6, MetricDataType.Int16, newValue6)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 7
#publishBirth()
elif metric.name == "output/Device Input7" or metric.alias == AliasMap.Device_Input7:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue7 = metric.int_value
print ("CMD message for output/Device Input7 - New Value: {}".format(newValue7))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input7, MetricDataType.Int16, newValue7)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 8
#publishBirth()
elif metric.name == "output/Device Input8" or metric.alias == AliasMap.Device_Input8:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue8 = metric.int_value
print ("CMD message for output/Device Input8 - New Value: {}".format(newValue8))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input8, MetricDataType.Int16, newValue8)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 9
#publishBirth()
elif metric.name == "output/Device Input9" or metric.alias == AliasMap.Device_Input9:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue9 = metric.int_value
print ("CMD message for output/Device Input9 - New Value: {}".format(newValue9))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input9, MetricDataType.Int16, newValue9)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 10
#publishBirth()
elif metric.name == "output/Device Input10" or metric.alias == AliasMap.Device_Input10:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue10 = metric.int_value
print ("CMD message for output/Device Input10 - New Value: {}".format(newValue10))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input10, MetricDataType.Int16, newValue10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#global newValue4
#publishBirth()
elif metric.name == "output/Device Metric4" or metric.alias == AliasMap.Device_Metric4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.string_value
print ("CMD message for output/Device Metric4 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric4, MetricDataType.String, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#publishBirth()
elif metric.name == "output/Device Metric3" or metric.alias == AliasMap.Device_Metric3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Boolean because of how we declated it in the DBIRTH
newValue = metric.boolean_value
print ("CMD message for output/Device Metric3 - New Value: %r" % newValue)
# Create the DDATA payload - use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric3, MetricDataType.Boolean, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
else:
print ("Unknown command: " + metric.name)
else:
print ("Unknown command...")
print ("Done publishing")
#####################################################################
######################################################################
######################################################################
# Publish the BIRTH certificates
######################################################################
def publishBirth():
publishNodeBirth()
publishDeviceBirth()
######################################################################
######################################################################
# Publish the NBIRTH certificate
######################################################################
def publishNodeBirth():
print ("Publishing Node Birth")
# Create the node birth payload
payload = sparkplug.getNodeBirthPayload()
# Set up the Node Controls
addMetric(payload, "Node Control/Next Server", AliasMap.Next_Server, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Rebirth", AliasMap.Rebirth, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Reboot", AliasMap.Reboot, MetricDataType.Boolean, False)
# Publish the node birth certificate
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/NBIRTH/" + myNodeName, byteArray, 0, False)
######################################################################
######################################################################
# Publish the DBIRTH certificate
######################################################################
def publishDeviceBirth():
print ("Publishing Device Birth")
# Get the payload
payload = sparkplug.getDeviceBirthPayload()
# Add some device metrics
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload, "input/Device Metric0", AliasMap.Device_Metric0, MetricDataType.String, "hello device")
addMetric(payload, "input/Device Metric1", AliasMap.Device_Metric1, MetricDataType.Boolean, True)
addMetric(payload, "input/Number of Objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "output/Device Metric2", AliasMap.Device_Metric2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input1", AliasMap.Device_Input1, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input2", AliasMap.Device_Input2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input3", AliasMap.Device_Input3, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input4", AliasMap.Device_Input4, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input5", AliasMap.Device_Input5, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input6", AliasMap.Device_Input6, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input7", AliasMap.Device_Input7, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input8", AliasMap.Device_Input8, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input9", AliasMap.Device_Input9, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input10", AliasMap.Device_Input10, MetricDataType.Int16, 0)
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Metric3", AliasMap.Device_Metric3, MetricDataType.Boolean, True)
addMetric(payload, "output/Device Metric4", AliasMap.Device_Metric4, MetricDataType.String, "start")
# Publish the initial data with the Device BIRTH certificate
totalByteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DBIRTH/" + myNodeName + "/" + myDeviceName, totalByteArray, 0, False)
######################################################################
######################################################################
def osd_sink_pad_buffer_probe(pad,info,u_data):
global frame_numberx
global num_rectsx
global Object1
global Object2
global Object3
global Object4
global Object5
global Object6
global Object7
global Object8
global Object9
global Object10
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_TOOTHBRUSH:0,
PGIE_CLASS_ID_HAIR_DRYER:0,
PGIE_CLASS_ID_TEDDY_BEAR:0,
PGIE_CLASS_ID_SCISSORS:0,
PGIE_CLASS_ID_VASE:0,
PGIE_CLASS_ID_CLOCK:0,
PGIE_CLASS_ID_BOOK:0,
PGIE_CLASS_ID_REFRIGERATOR:0,
PGIE_CLASS_ID_SINK:0,
PGIE_CLASS_ID_TOASTER:0,
PGIE_CLASS_ID_OVEN:0,
PGIE_CLASS_ID_MICROWAVE:0,
PGIE_CLASS_ID_CELL_PHONE:0,
PGIE_CLASS_ID_KEYBOARD:0,
PGIE_CLASS_ID_REMOTE:0,
PGIE_CLASS_ID_MOUSE:0,
PGIE_CLASS_ID_LAPTOP:0,
PGIE_CLASS_ID_TVMONITOR:0,
PGIE_CLASS_ID_TOILET:0,
PGIE_CLASS_ID_DININGTABLE:0,
PGIE_CLASS_ID_BED:0,
PGIE_CLASS_ID_POTTEDPLANT:0,
PGIE_CLASS_ID_SOFA:0,
PGIE_CLASS_ID_CHAIR:0,
PGIE_CLASS_ID_CAKE:0,
PGIE_CLASS_ID_DONUT:0,
PGIE_CLASS_ID_PIZZA:0,
PGIE_CLASS_ID_HOT_DOG:0,
PGIE_CLASS_ID_CARROT:0,
PGIE_CLASS_ID_BROCCOLI:0,
PGIE_CLASS_ID_ORANGE:0,
PGIE_CLASS_ID_SANDWICH:0,
PGIE_CLASS_ID_APPLE:0,
PGIE_CLASS_ID_BANANA:0,
PGIE_CLASS_ID_BOWL:0,
PGIE_CLASS_ID_SPOON:0,
PGIE_CLASS_ID_KNIFE:0,
PGIE_CLASS_ID_FORK:0,
PGIE_CLASS_ID_CUP:0,
PGIE_CLASS_ID_WINE_GLASS:0,
PGIE_CLASS_ID_BOTTLE:0,
PGIE_CLASS_ID_TENNIS_RACKET:0,
PGIE_CLASS_ID_SURFBOARD:0,
PGIE_CLASS_ID_SKATEBOARD:0,
PGIE_CLASS_ID_BASEBALL_GLOVE:0,
PGIE_CLASS_ID_BASEBALL_BAT:0,
PGIE_CLASS_ID_KITE:0,
PGIE_CLASS_ID_SPORTS_BALL:0,
PGIE_CLASS_ID_SNOWBOARD:0,
PGIE_CLASS_ID_SKIS:0,
PGIE_CLASS_ID_FRISBEE:0,
PGIE_CLASS_ID_SUITCASE:0,
PGIE_CLASS_ID_TIE:0,
PGIE_CLASS_ID_HANDBAG:0,
PGIE_CLASS_ID_UMBRELLA:0,
PGIE_CLASS_ID_BACKPACK:0,
PGIE_CLASS_ID_GIRAFFE:0,
PGIE_CLASS_ID_ZEBRA:0,
PGIE_CLASS_ID_BEAR:0,
PGIE_CLASS_ID_ELEPHANT:0,
PGIE_CLASS_ID_COW:0,
PGIE_CLASS_ID_SHEEP:0,
PGIE_CLASS_ID_HORSE:0,
PGIE_CLASS_ID_DOG:0,
PGIE_CLASS_ID_CAT:0,
PGIE_CLASS_ID_BIRD:0,
PGIE_CLASS_ID_BENCH:0,
PGIE_CLASS_ID_PARKING_METER:0,
PGIE_CLASS_ID_STOP_SIGN:0,
PGIE_CLASS_ID_FIRE_HYDRANT:0,
PGIE_CLASS_ID_TRAFFIC_LIGHT:0,
PGIE_CLASS_ID_BOAT:0,
PGIE_CLASS_ID_TRUCK:0,
PGIE_CLASS_ID_TRAIN:0,
PGIE_CLASS_ID_BUS:0,
PGIE_CLASS_ID_AEROPLANE:0,
PGIE_CLASS_ID_MOTORBIKE:0,
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_PERSON:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
frame_numberx=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
num_rectsx = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Bird_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_CUP], obj_counter[PGIE_CLASS_ID_BOTTLE])
Object1 = obj_counter[newValue1]
Object2 = obj_counter[newValue2]
Object3 = obj_counter[newValue3]
Object4 = obj_counter[newValue4]
Object5 = obj_counter[newValue5]
Object6 = obj_counter[newValue6]
Object7 = obj_counter[newValue7]
Object8 = obj_counter[newValue8]
Object9 = obj_counter[newValue9]
Object10 = obj_counter[newValue10]
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
# print(pyds.get_string(py_nvosd_text_params.display_text))
#pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
######################################################################
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
if not caps_v4l2src:
sys.stderr.write(" Unable to create v4l2src capsfilter \n")
print("Creating Video Converter \n")
# Adding videoconvert -> nvvideoconvert as not all
# raw formats are supported by nvvideoconvert;
# Say YUYV is unsupported - which is the common
# raw format for many logi usb cams
# In case we have a camera with raw format supported in
# nvvideoconvert, GStreamer plugins' capability negotiation
# shall be intelligent enough to reduce compute by
# videoconvert doing passthrough (TODO we need to confirm this)
# videoconvert to make sure a superset of raw formats are supported
vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
if not vidconvsrc:
sys.stderr.write(" Unable to create videoconvert \n")
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
if not nvvidconvsrc:
sys.stderr.write(" Unable to create Nvvideoconvert \n")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_vidconvsrc:
sys.stderr.write(" Unable to create capsfilter \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on camera's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Finally render the osd output
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
print("Playing cam %s " %args[1])
caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
source.set_property('device', args[1])
streammux.set_property('width', 640)
streammux.set_property('height', 480)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "config_infer_primary_yoloV3.txt")
# Set sync = false to avoid late frame drops at the display-sink
sink.set_property('sync', False)
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(caps_v4l2src)
pipeline.add(vidconvsrc)
pipeline.add(nvvidconvsrc)
pipeline.add(caps_vidconvsrc)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# v4l2src -> nvvideoconvert -> mux ->
# nvinfer -> nvvideoconvert -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(caps_v4l2src)
caps_v4l2src.link(vidconvsrc)
vidconvsrc.link(nvvidconvsrc)
nvvidconvsrc.link(caps_vidconvsrc)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = caps_vidconvsrc.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
######################################################################
# Create the node death payload
deathPayload = sparkplug.getNodeDeathPayload()
# Start of main program - Set up the MQTT client connection
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(myUsername, myPassword)
deathByteArray = bytearray(deathPayload.SerializeToString())
client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False)
client.connect(serverUrl, 1883, 60)
# Publish the birth certificates
publishBirth()
def foo():
# Periodically publish some new data
payload = sparkplug.getDdataPayload()
# Add some random data to the inputs
addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, Object1)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, Object2)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, Object3)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, Object4)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, Object5)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, Object6)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, Object7)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, Object8)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, Object9)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, Object10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Sit and wait for inbound or outbound events
for _ in range(1):
time.sleep(1)
client.loop()
threading.Timer(WAIT_SECONDS, foo).start()
foo()
######################################################################
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
#cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import sys
sys.path.append('../')
sys.path.insert(0, "../../../client_libraries/python/")
import paho.mqtt.client as mqtt
import sparkplug_b as sparkplug
import time
import time, threading
import random
import string
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from sparkplug_b import *
import pyds
# Application Variables
serverUrl = "localhost"
myGroupId = "Sparkplug B Devices"
myNodeName = "NVIDIA"
myDeviceName = "XavierNX"
publishPeriod = 5000
myUsername = "admin"
myPassword = "<PASSWORD>"
client = mqtt.Client(serverUrl, 1883, 60)
WAIT_SECONDS = 1
frame_numberx = 0
num_rectsx = 0
counter1 = 0
counter2 = 0
Object1 = 0
Object2 = 0
Object3 = 0
Object4 = 0
Object5 = 0
Object6 = 0
Object7 = 0
Object8 = 0
Object9 = 0
Object10 = 0
newValue1 = 0
newValue2 = 0
newValue3 = 0
newValue4 = 0
newValue5 = 0
newValue6 = 0
newValue7 = 0
newValue8 = 0
newValue9 = 0
newValue10 = 0
class AliasMap:
Next_Server = 0
Rebirth = 1
Reboot = 2
Device_frame_numberx = 3
Device_num_rectsx = 4
Device_Metric0 = 5
Device_Metric1 = 6
Device_Metric2 = 7
Device_Metric3 = 8
Device_Metric4 = 9
Device_counter1 = 10
Device_counter2 = 11
Device_Input1 = 12
Device_Input2 = 13
Device_Input3 = 14
Device_Input4 = 15
Device_Input5 = 16
Device_Input6 = 17
Device_Input7 = 18
Device_Input8 = 19
Device_Input9 = 20
Device_Input10 = 21
Device_Output1 = 22
Device_Output2 = 23
Device_Output3 = 24
Device_Output4 = 25
Device_Output5 = 26
Device_Output6 = 27
Device_Output7 = 28
Device_Output8 = 29
Device_Output9 = 30
Device_Output10 = 31
MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_TOOTHBRUSH = 79
PGIE_CLASS_ID_HAIR_DRYER = 78
PGIE_CLASS_ID_TEDDY_BEAR = 77
PGIE_CLASS_ID_SCISSORS = 76
PGIE_CLASS_ID_VASE = 75
PGIE_CLASS_ID_CLOCK = 74
PGIE_CLASS_ID_BOOK = 73
PGIE_CLASS_ID_REFRIGERATOR = 72
PGIE_CLASS_ID_SINK = 71
PGIE_CLASS_ID_TOASTER = 70
PGIE_CLASS_ID_OVEN = 69
PGIE_CLASS_ID_MICROWAVE = 68
PGIE_CLASS_ID_CELL_PHONE = 67
PGIE_CLASS_ID_KEYBOARD = 66
PGIE_CLASS_ID_REMOTE = 65
PGIE_CLASS_ID_MOUSE = 64
PGIE_CLASS_ID_LAPTOP = 63
PGIE_CLASS_ID_TVMONITOR = 62
PGIE_CLASS_ID_TOILET = 61
PGIE_CLASS_ID_DININGTABLE= 60
PGIE_CLASS_ID_BED = 59
PGIE_CLASS_ID_POTTEDPLANT = 58
PGIE_CLASS_ID_SOFA = 57
PGIE_CLASS_ID_CHAIR = 56
PGIE_CLASS_ID_CAKE = 55
PGIE_CLASS_ID_DONUT = 54
PGIE_CLASS_ID_PIZZA = 53
PGIE_CLASS_ID_HOT_DOG = 52
PGIE_CLASS_ID_CARROT = 51
PGIE_CLASS_ID_BROCCOLI = 50
PGIE_CLASS_ID_ORANGE = 49
PGIE_CLASS_ID_SANDWICH = 48
PGIE_CLASS_ID_APPLE = 47
PGIE_CLASS_ID_BANANA = 46
PGIE_CLASS_ID_BOWL = 45
PGIE_CLASS_ID_SPOON = 44
PGIE_CLASS_ID_KNIFE = 43
PGIE_CLASS_ID_FORK = 42
PGIE_CLASS_ID_CUP = 41
PGIE_CLASS_ID_WINE_GLASS = 40
PGIE_CLASS_ID_BOTTLE = 39
PGIE_CLASS_ID_TENNIS_RACKET = 38
PGIE_CLASS_ID_SURFBOARD = 37
PGIE_CLASS_ID_SKATEBOARD = 36
PGIE_CLASS_ID_BASEBALL_GLOVE = 35
PGIE_CLASS_ID_BASEBALL_BAT = 34
PGIE_CLASS_ID_KITE = 33
PGIE_CLASS_ID_SPORTS_BALL = 32
PGIE_CLASS_ID_SNOWBOARD = 31
PGIE_CLASS_ID_SKIS = 30
PGIE_CLASS_ID_FRISBEE = 29
PGIE_CLASS_ID_SUITCASE = 28
PGIE_CLASS_ID_TIE = 27
PGIE_CLASS_ID_HANDBAG = 26
PGIE_CLASS_ID_UMBRELLA = 25
PGIE_CLASS_ID_BACKPACK = 24
PGIE_CLASS_ID_GIRAFFE = 23
PGIE_CLASS_ID_ZEBRA = 22
PGIE_CLASS_ID_BEAR = 21
PGIE_CLASS_ID_ELEPHANT = 20
PGIE_CLASS_ID_COW = 19
PGIE_CLASS_ID_SHEEP = 18
PGIE_CLASS_ID_HORSE = 17
PGIE_CLASS_ID_DOG = 16
PGIE_CLASS_ID_CAT = 15
PGIE_CLASS_ID_BIRD = 14
PGIE_CLASS_ID_BENCH = 13
PGIE_CLASS_ID_PARKING_METER = 12
PGIE_CLASS_ID_STOP_SIGN = 11
PGIE_CLASS_ID_FIRE_HYDRANT = 10
PGIE_CLASS_ID_TRAFFIC_LIGHT = 9
PGIE_CLASS_ID_BOAT = 8
PGIE_CLASS_ID_TRUCK = 7
PGIE_CLASS_ID_TRAIN = 6
PGIE_CLASS_ID_BUS = 5
PGIE_CLASS_ID_AEROPLANE = 4
PGIE_CLASS_ID_MOTORBIKE = 3
PGIE_CLASS_ID_VEHICLE = 2
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 0
pgie_classes_str= ["Toothbrush", "Hair dryer", "Teddy bear","Scissors","Vase", "Clock", "Book","Refrigerator", "Sink", "Toaster","Oven","Microwave", "Cell phone", "Keyboard","Remote", "Mouse", "Laptop","Tvmonitor","Toilet", "Diningtable", "Bed","Pottedplant", "Sofa", "Chair","Cake","Donut", "Pizza", "Hot dog","Carrot", "Broccli", "Orange","Sandwich","Apple", "Banana", "Bowl","Spoon", "Knife", "Fork","Cup","Wine Glass", "Bottle", "Tennis racket","Surfboard", "Skateboard", "Baseball glove","Baseball bat","Kite", "Sports ball", "Snowboard","Skis", "Frisbee", "Suitcase","Tie","Handbag", "Umbrella", "Backpack","Giraffe", "Zebra", "Bear","Elephant","Cow", "Sheep", "Horse","Dog", "Cat", "Bird","Bench","Parking meter", "Stop sign", "Fire hydrant","Traffic light", "Boat", "Truck","Train","Bus", "Areoplane", "Motorbike","Car", "Bicycle", "Person"]
######################################################################
# The callback for when the client receives a CONNACK response from the server.
######################################################################
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected with result code "+str(rc))
else:
print("Failed to connect with result code "+str(rc))
sys.exit()
global myGroupId
global myNodeName
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("spBv1.0/" + myGroupId + "/NCMD/" + myNodeName + "/#")
client.subscribe("spBv1.0/" + myGroupId + "/DCMD/" + myNodeName + "/#")
######################################################################
######################################################################
# The callback for when a PUBLISH message is received from the server.
######################################################################
def on_message(client, userdata, msg):
print("Message arrived: " + msg.topic)
tokens = msg.topic.split("/")
global newValue1
global newValue2
global newValue3
global newValue4
global newValue5
global newValue6
global newValue7
global newValue8
global newValue9
global newValue10
if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and (tokens[2] == "NCMD" or tokens[2] == "DCMD") and tokens[3] == myNodeName:
inboundPayload = sparkplug_b_pb2.Payload()
inboundPayload.ParseFromString(msg.payload)
for metric in inboundPayload.metrics:
if metric.name == "Node Control/Next Server" or metric.alias == AliasMap.Next_Server:
# 'Node Control/Next Server' is an NCMD used to tell the device/client application to
# disconnect from the current MQTT server and connect to the next MQTT server in the
# list of available servers. This is used for clients that have a pool of MQTT servers
# to connect to.
print ("'Node Control/Next Server' is not implemented in this example")
elif metric.name == "Node Control/Rebirth" or metric.alias == AliasMap.Rebirth:
# 'Node Control/Rebirth' is an NCMD used to tell the device/client application to resend
# its full NBIRTH and DBIRTH again. MQTT Engine will send this NCMD to a device/client
# application if it receives an NDATA or DDATA with a metric that was not published in the
# original NBIRTH or DBIRTH. This is why the application must send all known metrics in
# its original NBIRTH and DBIRTH messages.
publishBirth()
elif metric.name == "Node Control/Reboot" or metric.alias == AliasMap.Reboot:
# 'Node Control/Reboot' is an NCMD used to tell a device/client application to reboot
# This can be used for devices that need a full application reset via a soft reboot.
# In this case, we fake a full reboot with a republishing of the NBIRTH and DBIRTH
# messages.
publishBirth()
elif metric.name == "output/Device Metric2" or metric.alias == AliasMap.Device_Metric2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.int_value
print ("CMD message for output/Device Metric2 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric2, MetricDataType.Int16, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 1
#publishBirth()
elif metric.name == "output/Device Input1" or metric.alias == AliasMap.Device_Input1:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue1 = metric.int_value
print ("CMD message for output/Device Input1 - New Value: {}".format(newValue1))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input1, MetricDataType.Int16, newValue1)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 2
#publishBirth()
elif metric.name == "output/Device Input2" or metric.alias == AliasMap.Device_Input2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue2 = metric.int_value
print ("CMD message for output/Device Input2 - New Value: {}".format(newValue2))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input2, MetricDataType.Int16, newValue2)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 3
#publishBirth()
elif metric.name == "output/Device Input3" or metric.alias == AliasMap.Device_Input3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue3 = metric.int_value
print ("CMD message for output/Device Input3 - New Value: {}".format(newValue3))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input3, MetricDataType.Int16, newValue3)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 4
#publishBirth()
elif metric.name == "output/Device Input4" or metric.alias == AliasMap.Device_Input4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue4 = metric.int_value
print ("CMD message for output/Device Input4 - New Value: {}".format(newValue4))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input4, MetricDataType.Int16, newValue4)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 5
#publishBirth()
elif metric.name == "output/Device Input5" or metric.alias == AliasMap.Device_Input5:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue5 = metric.int_value
print ("CMD message for output/Device Input5 - New Value: {}".format(newValue5))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input5, MetricDataType.Int16, newValue5)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 6
#publishBirth()
elif metric.name == "output/Device Input6" or metric.alias == AliasMap.Device_Input6:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue6 = metric.int_value
print ("CMD message for output/Device Input6 - New Value: {}".format(newValue6))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input6, MetricDataType.Int16, newValue6)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 7
#publishBirth()
elif metric.name == "output/Device Input7" or metric.alias == AliasMap.Device_Input7:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue7 = metric.int_value
print ("CMD message for output/Device Input7 - New Value: {}".format(newValue7))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input7, MetricDataType.Int16, newValue7)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 8
#publishBirth()
elif metric.name == "output/Device Input8" or metric.alias == AliasMap.Device_Input8:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue8 = metric.int_value
print ("CMD message for output/Device Input8 - New Value: {}".format(newValue8))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input8, MetricDataType.Int16, newValue8)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 9
#publishBirth()
elif metric.name == "output/Device Input9" or metric.alias == AliasMap.Device_Input9:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue9 = metric.int_value
print ("CMD message for output/Device Input9 - New Value: {}".format(newValue9))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input9, MetricDataType.Int16, newValue9)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 10
#publishBirth()
elif metric.name == "output/Device Input10" or metric.alias == AliasMap.Device_Input10:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue10 = metric.int_value
print ("CMD message for output/Device Input10 - New Value: {}".format(newValue10))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input10, MetricDataType.Int16, newValue10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#global newValue4
#publishBirth()
elif metric.name == "output/Device Metric4" or metric.alias == AliasMap.Device_Metric4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.string_value
print ("CMD message for output/Device Metric4 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric4, MetricDataType.String, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#publishBirth()
elif metric.name == "output/Device Metric3" or metric.alias == AliasMap.Device_Metric3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Boolean because of how we declated it in the DBIRTH
newValue = metric.boolean_value
print ("CMD message for output/Device Metric3 - New Value: %r" % newValue)
# Create the DDATA payload - use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric3, MetricDataType.Boolean, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
else:
print ("Unknown command: " + metric.name)
else:
print ("Unknown command...")
print ("Done publishing")
#####################################################################
######################################################################
######################################################################
# Publish the BIRTH certificates
######################################################################
def publishBirth():
publishNodeBirth()
publishDeviceBirth()
######################################################################
######################################################################
# Publish the NBIRTH certificate
######################################################################
def publishNodeBirth():
print ("Publishing Node Birth")
# Create the node birth payload
payload = sparkplug.getNodeBirthPayload()
# Set up the Node Controls
addMetric(payload, "Node Control/Next Server", AliasMap.Next_Server, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Rebirth", AliasMap.Rebirth, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Reboot", AliasMap.Reboot, MetricDataType.Boolean, False)
# Publish the node birth certificate
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/NBIRTH/" + myNodeName, byteArray, 0, False)
######################################################################
######################################################################
# Publish the DBIRTH certificate
######################################################################
def publishDeviceBirth():
print ("Publishing Device Birth")
# Get the payload
payload = sparkplug.getDeviceBirthPayload()
# Add some device metrics
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload, "input/Device Metric0", AliasMap.Device_Metric0, MetricDataType.String, "hello device")
addMetric(payload, "input/Device Metric1", AliasMap.Device_Metric1, MetricDataType.Boolean, True)
addMetric(payload, "input/Number of Objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "output/Device Metric2", AliasMap.Device_Metric2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input1", AliasMap.Device_Input1, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input2", AliasMap.Device_Input2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input3", AliasMap.Device_Input3, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input4", AliasMap.Device_Input4, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input5", AliasMap.Device_Input5, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input6", AliasMap.Device_Input6, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input7", AliasMap.Device_Input7, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input8", AliasMap.Device_Input8, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input9", AliasMap.Device_Input9, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input10", AliasMap.Device_Input10, MetricDataType.Int16, 0)
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Metric3", AliasMap.Device_Metric3, MetricDataType.Boolean, True)
addMetric(payload, "output/Device Metric4", AliasMap.Device_Metric4, MetricDataType.String, "start")
# Publish the initial data with the Device BIRTH certificate
totalByteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DBIRTH/" + myNodeName + "/" + myDeviceName, totalByteArray, 0, False)
######################################################################
######################################################################
def osd_sink_pad_buffer_probe(pad,info,u_data):
global frame_numberx
global num_rectsx
global Object1
global Object2
global Object3
global Object4
global Object5
global Object6
global Object7
global Object8
global Object9
global Object10
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_TOOTHBRUSH:0,
PGIE_CLASS_ID_HAIR_DRYER:0,
PGIE_CLASS_ID_TEDDY_BEAR:0,
PGIE_CLASS_ID_SCISSORS:0,
PGIE_CLASS_ID_VASE:0,
PGIE_CLASS_ID_CLOCK:0,
PGIE_CLASS_ID_BOOK:0,
PGIE_CLASS_ID_REFRIGERATOR:0,
PGIE_CLASS_ID_SINK:0,
PGIE_CLASS_ID_TOASTER:0,
PGIE_CLASS_ID_OVEN:0,
PGIE_CLASS_ID_MICROWAVE:0,
PGIE_CLASS_ID_CELL_PHONE:0,
PGIE_CLASS_ID_KEYBOARD:0,
PGIE_CLASS_ID_REMOTE:0,
PGIE_CLASS_ID_MOUSE:0,
PGIE_CLASS_ID_LAPTOP:0,
PGIE_CLASS_ID_TVMONITOR:0,
PGIE_CLASS_ID_TOILET:0,
PGIE_CLASS_ID_DININGTABLE:0,
PGIE_CLASS_ID_BED:0,
PGIE_CLASS_ID_POTTEDPLANT:0,
PGIE_CLASS_ID_SOFA:0,
PGIE_CLASS_ID_CHAIR:0,
PGIE_CLASS_ID_CAKE:0,
PGIE_CLASS_ID_DONUT:0,
PGIE_CLASS_ID_PIZZA:0,
PGIE_CLASS_ID_HOT_DOG:0,
PGIE_CLASS_ID_CARROT:0,
PGIE_CLASS_ID_BROCCOLI:0,
PGIE_CLASS_ID_ORANGE:0,
PGIE_CLASS_ID_SANDWICH:0,
PGIE_CLASS_ID_APPLE:0,
PGIE_CLASS_ID_BANANA:0,
PGIE_CLASS_ID_BOWL:0,
PGIE_CLASS_ID_SPOON:0,
PGIE_CLASS_ID_KNIFE:0,
PGIE_CLASS_ID_FORK:0,
PGIE_CLASS_ID_CUP:0,
PGIE_CLASS_ID_WINE_GLASS:0,
PGIE_CLASS_ID_BOTTLE:0,
PGIE_CLASS_ID_TENNIS_RACKET:0,
PGIE_CLASS_ID_SURFBOARD:0,
PGIE_CLASS_ID_SKATEBOARD:0,
PGIE_CLASS_ID_BASEBALL_GLOVE:0,
PGIE_CLASS_ID_BASEBALL_BAT:0,
PGIE_CLASS_ID_KITE:0,
PGIE_CLASS_ID_SPORTS_BALL:0,
PGIE_CLASS_ID_SNOWBOARD:0,
PGIE_CLASS_ID_SKIS:0,
PGIE_CLASS_ID_FRISBEE:0,
PGIE_CLASS_ID_SUITCASE:0,
PGIE_CLASS_ID_TIE:0,
PGIE_CLASS_ID_HANDBAG:0,
PGIE_CLASS_ID_UMBRELLA:0,
PGIE_CLASS_ID_BACKPACK:0,
PGIE_CLASS_ID_GIRAFFE:0,
PGIE_CLASS_ID_ZEBRA:0,
PGIE_CLASS_ID_BEAR:0,
PGIE_CLASS_ID_ELEPHANT:0,
PGIE_CLASS_ID_COW:0,
PGIE_CLASS_ID_SHEEP:0,
PGIE_CLASS_ID_HORSE:0,
PGIE_CLASS_ID_DOG:0,
PGIE_CLASS_ID_CAT:0,
PGIE_CLASS_ID_BIRD:0,
PGIE_CLASS_ID_BENCH:0,
PGIE_CLASS_ID_PARKING_METER:0,
PGIE_CLASS_ID_STOP_SIGN:0,
PGIE_CLASS_ID_FIRE_HYDRANT:0,
PGIE_CLASS_ID_TRAFFIC_LIGHT:0,
PGIE_CLASS_ID_BOAT:0,
PGIE_CLASS_ID_TRUCK:0,
PGIE_CLASS_ID_TRAIN:0,
PGIE_CLASS_ID_BUS:0,
PGIE_CLASS_ID_AEROPLANE:0,
PGIE_CLASS_ID_MOTORBIKE:0,
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_PERSON:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
frame_numberx=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
num_rectsx = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Bird_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_CUP], obj_counter[PGIE_CLASS_ID_BOTTLE])
Object1 = obj_counter[newValue1]
Object2 = obj_counter[newValue2]
Object3 = obj_counter[newValue3]
Object4 = obj_counter[newValue4]
Object5 = obj_counter[newValue5]
Object6 = obj_counter[newValue6]
Object7 = obj_counter[newValue7]
Object8 = obj_counter[newValue8]
Object9 = obj_counter[newValue9]
Object10 = obj_counter[newValue10]
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
# print(pyds.get_string(py_nvosd_text_params.display_text))
#pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
######################################################################
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
if not caps_v4l2src:
sys.stderr.write(" Unable to create v4l2src capsfilter \n")
print("Creating Video Converter \n")
# Adding videoconvert -> nvvideoconvert as not all
# raw formats are supported by nvvideoconvert;
# Say YUYV is unsupported - which is the common
# raw format for many logi usb cams
# In case we have a camera with raw format supported in
# nvvideoconvert, GStreamer plugins' capability negotiation
# shall be intelligent enough to reduce compute by
# videoconvert doing passthrough (TODO we need to confirm this)
# videoconvert to make sure a superset of raw formats are supported
vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
if not vidconvsrc:
sys.stderr.write(" Unable to create videoconvert \n")
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
if not nvvidconvsrc:
sys.stderr.write(" Unable to create Nvvideoconvert \n")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_vidconvsrc:
sys.stderr.write(" Unable to create capsfilter \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on camera's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Finally render the osd output
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
print("Playing cam %s " %args[1])
caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
source.set_property('device', args[1])
streammux.set_property('width', 640)
streammux.set_property('height', 480)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "config_infer_primary_yoloV3.txt")
# Set sync = false to avoid late frame drops at the display-sink
sink.set_property('sync', False)
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(caps_v4l2src)
pipeline.add(vidconvsrc)
pipeline.add(nvvidconvsrc)
pipeline.add(caps_vidconvsrc)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# v4l2src -> nvvideoconvert -> mux ->
# nvinfer -> nvvideoconvert -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(caps_v4l2src)
caps_v4l2src.link(vidconvsrc)
vidconvsrc.link(nvvidconvsrc)
nvvidconvsrc.link(caps_vidconvsrc)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = caps_vidconvsrc.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
######################################################################
# Create the node death payload
deathPayload = sparkplug.getNodeDeathPayload()
# Start of main program - Set up the MQTT client connection
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(myUsername, myPassword)
deathByteArray = bytearray(deathPayload.SerializeToString())
client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False)
client.connect(serverUrl, 1883, 60)
# Publish the birth certificates
publishBirth()
def foo():
# Periodically publish some new data
payload = sparkplug.getDdataPayload()
# Add some random data to the inputs
addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, Object1)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, Object2)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, Object3)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, Object4)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, Object5)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, Object6)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, Object7)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, Object8)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, Object9)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, Object10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Sit and wait for inbound or outbound events
for _ in range(1):
time.sleep(1)
client.loop()
threading.Timer(WAIT_SECONDS, foo).start()
foo()
######################################################################
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
#cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv)) | en | 0.742755 | #!/usr/bin/env python3 ################################################################################ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ################################################################################ # Application Variables ###################################################################### # The callback for when the client receives a CONNACK response from the server. ###################################################################### # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. #") #") ###################################################################### ###################################################################### # The callback for when a PUBLISH message is received from the server. ###################################################################### # 'Node Control/Next Server' is an NCMD used to tell the device/client application to # disconnect from the current MQTT server and connect to the next MQTT server in the # list of available servers. This is used for clients that have a pool of MQTT servers # to connect to. # 'Node Control/Rebirth' is an NCMD used to tell the device/client application to resend # its full NBIRTH and DBIRTH again. MQTT Engine will send this NCMD to a device/client # application if it receives an NDATA or DDATA with a metric that was not published in the # original NBIRTH or DBIRTH. This is why the application must send all known metrics in # its original NBIRTH and DBIRTH messages. # 'Node Control/Reboot' is an NCMD used to tell a device/client application to reboot # This can be used for devices that need a full application reset via a soft reboot. # In this case, we fake a full reboot with a republishing of the NBIRTH and DBIRTH # messages. # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 1 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 2 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 3 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 4 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 5 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 6 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 7 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 8 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 9 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data # Publish a message Input 10 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data #global newValue4 #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH # Create the DDATA payload - Use the alias because this isn't the DBIRTH # Publish a message data #publishBirth() # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Boolean because of how we declated it in the DBIRTH # Create the DDATA payload - use the alias because this isn't the DBIRTH # Publish a message data ##################################################################### ###################################################################### ###################################################################### # Publish the BIRTH certificates ###################################################################### ###################################################################### ###################################################################### # Publish the NBIRTH certificate ###################################################################### # Create the node birth payload # Set up the Node Controls # Publish the node birth certificate ###################################################################### ###################################################################### # Publish the DBIRTH certificate ###################################################################### # Get the payload # Add some device metrics # Publish the initial data with the Device BIRTH certificate ###################################################################### ###################################################################### #Intiallizing object counter with 0. # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. # Casting l_obj.data to pyds.NvDsObjectMeta # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. # Now set the offsets where the string should appear # Font , font-color and font-size # set(red, green, blue, alpha); set to White # Text background color # set(red, green, blue, alpha); set to Black # Using pyds.get_string() to get display_text as string # print(pyds.get_string(py_nvosd_text_params.display_text)) #pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) ###################################################################### # Check input arguments # Standard GStreamer initialization # Create gstreamer elements # Create Pipeline element that will form a connection of other elements # Source element for reading from the file # Adding videoconvert -> nvvideoconvert as not all # raw formats are supported by nvvideoconvert; # Say YUYV is unsupported - which is the common # raw format for many logi usb cams # In case we have a camera with raw format supported in # nvvideoconvert, GStreamer plugins' capability negotiation # shall be intelligent enough to reduce compute by # videoconvert doing passthrough (TODO we need to confirm this) # videoconvert to make sure a superset of raw formats are supported # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API) # Create nvstreammux instance to form batches from one or more sources. # Use nvinfer to run inferencing on camera's output, # behaviour of inferencing is set through config file # Use convertor to convert from NV12 to RGBA as required by nvosd # Create OSD to draw on the converted RGBA buffer # Finally render the osd output # Set sync = false to avoid late frame drops at the display-sink # we link the elements together # v4l2src -> nvvideoconvert -> mux -> # nvinfer -> nvvideoconvert -> nvosd -> video-renderer # create an event loop and feed gstreamer bus mesages to it # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. ###################################################################### # Create the node death payload # Start of main program - Set up the MQTT client connection # Publish the birth certificates # Periodically publish some new data # Add some random data to the inputs # Publish a message data # Sit and wait for inbound or outbound events ###################################################################### #cleanup | 1.302672 | 1 |
src/pyams_i18n/tests/__init__.py | Py-AMS/pyams-i18n | 0 | 854 | <filename>src/pyams_i18n/tests/__init__.py
#
# Copyright (c) 2015-2019 <NAME> <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""
Generic test cases for pyams_i18n doctests
"""
__docformat__ = 'restructuredtext'
import os
import sys
def get_package_dir(value):
"""Get package directory"""
package_dir = os.path.split(value)[0]
if package_dir not in sys.path:
sys.path.append(package_dir)
return package_dir
| <filename>src/pyams_i18n/tests/__init__.py
#
# Copyright (c) 2015-2019 <NAME> <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""
Generic test cases for pyams_i18n doctests
"""
__docformat__ = 'restructuredtext'
import os
import sys
def get_package_dir(value):
"""Get package directory"""
package_dir = os.path.split(value)[0]
if package_dir not in sys.path:
sys.path.append(package_dir)
return package_dir
| en | 0.643337 | # # Copyright (c) 2015-2019 <NAME> <tflorac AT ulthar.net> # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # Generic test cases for pyams_i18n doctests Get package directory | 1.759245 | 2 |
tests/test_custom_experts.py | protagohhz/hivemind | 1,026 | 855 | import os
import pytest
import torch
from hivemind import RemoteExpert
from hivemind.moe.server import background_server
CUSTOM_EXPERTS_PATH = os.path.join(os.path.dirname(__file__), "test_utils", "custom_networks.py")
@pytest.mark.forked
def test_custom_expert(hid_dim=16):
with background_server(
expert_cls="perceptron",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = torch.randn(batch_size, hid_dim)
output0 = expert0(batch)
output1 = expert1(batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
@pytest.mark.forked
def test_multihead_expert(hid_dim=16):
with background_server(
expert_cls="multihead",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = (
torch.randn(batch_size, hid_dim),
torch.randn(batch_size, 2 * hid_dim),
torch.randn(batch_size, 3 * hid_dim),
)
output0 = expert0(*batch)
output1 = expert1(*batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
| import os
import pytest
import torch
from hivemind import RemoteExpert
from hivemind.moe.server import background_server
CUSTOM_EXPERTS_PATH = os.path.join(os.path.dirname(__file__), "test_utils", "custom_networks.py")
@pytest.mark.forked
def test_custom_expert(hid_dim=16):
with background_server(
expert_cls="perceptron",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = torch.randn(batch_size, hid_dim)
output0 = expert0(batch)
output1 = expert1(batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
@pytest.mark.forked
def test_multihead_expert(hid_dim=16):
with background_server(
expert_cls="multihead",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = (
torch.randn(batch_size, hid_dim),
torch.randn(batch_size, 2 * hid_dim),
torch.randn(batch_size, 3 * hid_dim),
)
output0 = expert0(*batch)
output1 = expert1(*batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
| none | 1 | 1.903273 | 2 |
|
tqsdk/demo/example/momentum.py | boyscout2008/tqsdk-python | 0 | 856 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Ringo"
'''
价格动量 策略 (难度:初级)
参考: https://www.shinnytech.com/blog/momentum-strategy/
注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改
'''
from tqsdk import TqAccount, TqApi, TargetPosTask
# 设置指定合约,获取N条K线计算价格动量
SYMBOL = "SHFE.au1912"
N = 15
api = TqApi()
klines = api.get_kline_serial(SYMBOL, 60*60*24, N)
quote = api.get_quote(SYMBOL)
target_pos = TargetPosTask(api, SYMBOL)
position = api.get_position(SYMBOL)
# 编写价格动量函数AR,以前N-1日K线计算价格动量ar
def AR(kline1):
spread_ho = sum(kline1.high[:-1] - kline1.open[:-1])
spread_oc = sum(kline1.open[:-1] - kline1.low[:-1])
# spread_oc 为0时,设置为最小价格跳动值
if spread_oc == 0:
spread_oc = quote.price_tick
ar = (spread_ho/spread_oc)*100
return ar
ar = AR(klines)
print("策略开始启动")
while True:
api.wait_update()
# 生成新K线时,重新计算价格动量值ar
if api.is_changing(klines.iloc[-1], "datetime"):
ar = AR(klines)
print("价格动量是:", ar)
# 每次最新价发生变动时,重新进行判断
if api.is_changing(quote, "last_price"):
# 开仓策略
if position.pos_long == 0 and position.pos_short == 0:
# 如果ar大于110并且小于150,开多仓
if 110 < ar < 150:
print("价值动量超过110,小于150,做多")
target_pos.set_target_volume(100)
# 如果ar大于50,小于90,开空仓
elif 50 < ar < 90:
print("价值动量大于50,小于90,做空")
target_pos.set_target_volume(-100)
# 止损策略,多头下当前ar值小于90则平仓止损,空头下当前ar值大于110则平仓止损
elif (position.pos_long > 0 and ar < 90) or (position.pos_short > 0 and ar > 110):
print("止损平仓")
target_pos.set_target_volume(0)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Ringo"
'''
价格动量 策略 (难度:初级)
参考: https://www.shinnytech.com/blog/momentum-strategy/
注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改
'''
from tqsdk import TqAccount, TqApi, TargetPosTask
# 设置指定合约,获取N条K线计算价格动量
SYMBOL = "SHFE.au1912"
N = 15
api = TqApi()
klines = api.get_kline_serial(SYMBOL, 60*60*24, N)
quote = api.get_quote(SYMBOL)
target_pos = TargetPosTask(api, SYMBOL)
position = api.get_position(SYMBOL)
# 编写价格动量函数AR,以前N-1日K线计算价格动量ar
def AR(kline1):
spread_ho = sum(kline1.high[:-1] - kline1.open[:-1])
spread_oc = sum(kline1.open[:-1] - kline1.low[:-1])
# spread_oc 为0时,设置为最小价格跳动值
if spread_oc == 0:
spread_oc = quote.price_tick
ar = (spread_ho/spread_oc)*100
return ar
ar = AR(klines)
print("策略开始启动")
while True:
api.wait_update()
# 生成新K线时,重新计算价格动量值ar
if api.is_changing(klines.iloc[-1], "datetime"):
ar = AR(klines)
print("价格动量是:", ar)
# 每次最新价发生变动时,重新进行判断
if api.is_changing(quote, "last_price"):
# 开仓策略
if position.pos_long == 0 and position.pos_short == 0:
# 如果ar大于110并且小于150,开多仓
if 110 < ar < 150:
print("价值动量超过110,小于150,做多")
target_pos.set_target_volume(100)
# 如果ar大于50,小于90,开空仓
elif 50 < ar < 90:
print("价值动量大于50,小于90,做空")
target_pos.set_target_volume(-100)
# 止损策略,多头下当前ar值小于90则平仓止损,空头下当前ar值大于110则平仓止损
elif (position.pos_long > 0 and ar < 90) or (position.pos_short > 0 and ar > 110):
print("止损平仓")
target_pos.set_target_volume(0)
| zh | 0.816841 | #!/usr/bin/env python # -*- coding: utf-8 -*- 价格动量 策略 (难度:初级) 参考: https://www.shinnytech.com/blog/momentum-strategy/ 注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改 # 设置指定合约,获取N条K线计算价格动量 # 编写价格动量函数AR,以前N-1日K线计算价格动量ar # spread_oc 为0时,设置为最小价格跳动值 # 生成新K线时,重新计算价格动量值ar # 每次最新价发生变动时,重新进行判断 # 开仓策略 # 如果ar大于110并且小于150,开多仓 # 如果ar大于50,小于90,开空仓 # 止损策略,多头下当前ar值小于90则平仓止损,空头下当前ar值大于110则平仓止损 | 2.443273 | 2 |
color_transfer/__init__.py | AdamSpannbauer/color_transfer | 0 | 857 | <reponame>AdamSpannbauer/color_transfer<gh_stars>0
# import the necessary packages
import numpy as np
import cv2
import imutils
def color_transfer(source, target, clip=True, preserve_paper=True):
"""
Transfers the color distribution from the source to the target
image using the mean and standard deviations of the L*a*b*
color space.
This implementation is (loosely) based on to the "Color Transfer
between Images" paper by <NAME> al., 2001.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
clip: Should components of L*a*b* image be scaled by np.clip before
converting back to BGR color space?
If False then components will be min-max scaled appropriately.
Clipping will keep target image brightness truer to the input.
Scaling will adjust image brightness to avoid washed out portions
in the resulting color transfer that can be caused by clipping.
preserve_paper: Should color transfer strictly follow methodology
laid out in original paper? The method does not always produce
aesthetically pleasing results.
If False then L*a*b* components will scaled using the reciprocal of
the scaling factor proposed in the paper. This method seems to produce
more consistently aesthetically pleasing results
Returns:
-------
transfer: NumPy array
OpenCV image (w, h, 3) NumPy array (uint8)
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
# compute color statistics for the source and target images
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)
# subtract the means from the target image
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
# scale by the standard deviations using paper proposed factor
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
else:
# scale by the standard deviations using reciprocal of paper proposed factor
l = (lStdSrc / lStdTar) * l
a = (aStdSrc / aStdTar) * a
b = (bStdSrc / bStdTar) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip/scale the pixel intensities to [0, 255] if they fall
# outside this range
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)
# return the color transferred image
return transfer
def auto_color_transfer(source, target):
"""Pick color_transfer result truest to source image color
Applies color_transfer with all possible combinations of the clip & preserve_paper arguments.
Mean absolute error (MAE) is computed for the HSV channels of each result and the source image.
The best_result that minimizes the MAE is returned as well as a montage of all candidate results.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
Returns:
-------
tuple: (best_result, comparison)
best_result: NumPy array
result that minimizes mean absolute error between compared to source image in HSV color space
comparison: NumPy array
image showing the results of all combinations of color_transfer options
"""
# get mean HSV stats from source image for comparison
hsv_source = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
hsv_hist_src = cv2.calcHist([hsv_source], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# iterate through all 4 options for toggling color transfer
bools = [True, False]
candidates = []
best_result = None
best_dist = float('inf')
for clip in bools:
for preserve_paper in bools:
# create candidate image from options of this iteration
candidate = color_transfer(source, target, clip, preserve_paper)
# get mean HSV stats from candidate image for comparison
hsv_candidate = cv2.cvtColor(candidate, cv2.COLOR_BGR2HSV)
hsv_hist_cand = cv2.calcHist([hsv_candidate], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# calc chi square dist
chi2_dist = chi2_distance(hsv_hist_src, hsv_hist_cand)
# propose new truest result if found new smallest mae
if chi2_dist < best_dist:
best_result = candidate[:]
candidates.append(candidate)
# build 2 by 2 image matrix of all candidates for comparison
comparison = np.hstack((np.vstack(candidates[:2]),
np.vstack(candidates[2:])))
# add border annotations showing values of params for each output
comparison = _bool_matrix_border(comparison)
return best_result, comparison
def chi2_distance(hist_a, hist_b, eps=1e-10):
return 0.5 * np.sum(((hist_a - hist_b) ** 2) / (hist_a + hist_b + eps))
def _bool_matrix_border(comparison_image):
"""Apply table formatting for comparison of color_transfer options
Parameters:
-------
target: NumPy array
OpenCV image in BGR color space (the comparison image produced in auto_color_transfer)
Returns:
-------
comparison: NumPy array
OpenCV image in BGR color space with borders applied to easily compare the different
results of the auto_color_transfer
"""
# 200 seems to work well as border size
border_size = 200
# put black border on top and left of input image
h, w = comparison_image.shape[:2]
top = np.zeros(w * border_size, dtype='uint8').reshape(border_size, w)
left = np.zeros((h + border_size) * border_size, dtype='uint8').reshape(h + border_size, border_size)
top = cv2.cvtColor(top, cv2.COLOR_GRAY2BGR)
left = cv2.cvtColor(left, cv2.COLOR_GRAY2BGR)
bordered_comparison_image = np.vstack((top, comparison_image))
bordered_comparison_image = np.hstack((left, bordered_comparison_image))
# add text for clip arg options to top border
top_title_loc = (border_size, 75)
top_true_loc = (border_size, 190)
top_false_loc = (int(border_size + w / 2), 190)
cv2.putText(bordered_comparison_image, 'Clip', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate 90 degrees for writing text to left border
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, 90)
# add text for preserve paper arg options to left border
top_title_loc = (5, 75)
top_true_loc = (5 + int(h / 2), 190)
top_false_loc = (5, 190)
cv2.putText(bordered_comparison_image, 'Preserve Paper', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate -90 degrees to return image in correct orientation
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, -90)
return bordered_comparison_image
def image_stats(image):
"""
Parameters:
-------
image: NumPy array
OpenCV image in L*a*b* color space
Returns:
-------
Tuple of mean and standard deviations for the L*, a*, and b*
channels, respectively
"""
# compute the mean and standard deviation of each channel
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return lMean, lStd, aMean, aStd, bMean, bStd
def _min_max_scale(arr, new_range=(0, 255)):
"""
Perform min-max scaling to a NumPy array
Parameters:
-------
arr: NumPy array to be scaled to [new_min, new_max] range
new_range: tuple of form (min, max) specifying range of
transformed array
Returns:
-------
NumPy array that has been scaled to be in
[new_range[0], new_range[1]] range
"""
# get array's current min and max
mn = arr.min()
mx = arr.max()
# check if scaling needs to be done to be in new_range
if mn < new_range[0] or mx > new_range[1]:
# perform min-max scaling
scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]
else:
# return array if already in range
scaled = arr
return scaled
def _scale_array(arr, clip=True):
"""
Trim NumPy array values to be in [0, 255] range with option of
clipping or scaling.
Parameters:
-------
arr: array to be trimmed to [0, 255] range
clip: should array be scaled by np.clip? if False then input
array will be min-max scaled to range
[max([arr.min(), 0]), min([arr.max(), 255])]
Returns:
-------
NumPy array that has been scaled to be in [0, 255] range
"""
if clip:
scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))
scaled = _min_max_scale(arr, new_range=scale_range)
return scaled
| # import the necessary packages
import numpy as np
import cv2
import imutils
def color_transfer(source, target, clip=True, preserve_paper=True):
"""
Transfers the color distribution from the source to the target
image using the mean and standard deviations of the L*a*b*
color space.
This implementation is (loosely) based on to the "Color Transfer
between Images" paper by <NAME> al., 2001.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
clip: Should components of L*a*b* image be scaled by np.clip before
converting back to BGR color space?
If False then components will be min-max scaled appropriately.
Clipping will keep target image brightness truer to the input.
Scaling will adjust image brightness to avoid washed out portions
in the resulting color transfer that can be caused by clipping.
preserve_paper: Should color transfer strictly follow methodology
laid out in original paper? The method does not always produce
aesthetically pleasing results.
If False then L*a*b* components will scaled using the reciprocal of
the scaling factor proposed in the paper. This method seems to produce
more consistently aesthetically pleasing results
Returns:
-------
transfer: NumPy array
OpenCV image (w, h, 3) NumPy array (uint8)
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
# compute color statistics for the source and target images
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)
# subtract the means from the target image
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
# scale by the standard deviations using paper proposed factor
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
else:
# scale by the standard deviations using reciprocal of paper proposed factor
l = (lStdSrc / lStdTar) * l
a = (aStdSrc / aStdTar) * a
b = (bStdSrc / bStdTar) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip/scale the pixel intensities to [0, 255] if they fall
# outside this range
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)
# return the color transferred image
return transfer
def auto_color_transfer(source, target):
"""Pick color_transfer result truest to source image color
Applies color_transfer with all possible combinations of the clip & preserve_paper arguments.
Mean absolute error (MAE) is computed for the HSV channels of each result and the source image.
The best_result that minimizes the MAE is returned as well as a montage of all candidate results.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
Returns:
-------
tuple: (best_result, comparison)
best_result: NumPy array
result that minimizes mean absolute error between compared to source image in HSV color space
comparison: NumPy array
image showing the results of all combinations of color_transfer options
"""
# get mean HSV stats from source image for comparison
hsv_source = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
hsv_hist_src = cv2.calcHist([hsv_source], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# iterate through all 4 options for toggling color transfer
bools = [True, False]
candidates = []
best_result = None
best_dist = float('inf')
for clip in bools:
for preserve_paper in bools:
# create candidate image from options of this iteration
candidate = color_transfer(source, target, clip, preserve_paper)
# get mean HSV stats from candidate image for comparison
hsv_candidate = cv2.cvtColor(candidate, cv2.COLOR_BGR2HSV)
hsv_hist_cand = cv2.calcHist([hsv_candidate], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# calc chi square dist
chi2_dist = chi2_distance(hsv_hist_src, hsv_hist_cand)
# propose new truest result if found new smallest mae
if chi2_dist < best_dist:
best_result = candidate[:]
candidates.append(candidate)
# build 2 by 2 image matrix of all candidates for comparison
comparison = np.hstack((np.vstack(candidates[:2]),
np.vstack(candidates[2:])))
# add border annotations showing values of params for each output
comparison = _bool_matrix_border(comparison)
return best_result, comparison
def chi2_distance(hist_a, hist_b, eps=1e-10):
return 0.5 * np.sum(((hist_a - hist_b) ** 2) / (hist_a + hist_b + eps))
def _bool_matrix_border(comparison_image):
"""Apply table formatting for comparison of color_transfer options
Parameters:
-------
target: NumPy array
OpenCV image in BGR color space (the comparison image produced in auto_color_transfer)
Returns:
-------
comparison: NumPy array
OpenCV image in BGR color space with borders applied to easily compare the different
results of the auto_color_transfer
"""
# 200 seems to work well as border size
border_size = 200
# put black border on top and left of input image
h, w = comparison_image.shape[:2]
top = np.zeros(w * border_size, dtype='uint8').reshape(border_size, w)
left = np.zeros((h + border_size) * border_size, dtype='uint8').reshape(h + border_size, border_size)
top = cv2.cvtColor(top, cv2.COLOR_GRAY2BGR)
left = cv2.cvtColor(left, cv2.COLOR_GRAY2BGR)
bordered_comparison_image = np.vstack((top, comparison_image))
bordered_comparison_image = np.hstack((left, bordered_comparison_image))
# add text for clip arg options to top border
top_title_loc = (border_size, 75)
top_true_loc = (border_size, 190)
top_false_loc = (int(border_size + w / 2), 190)
cv2.putText(bordered_comparison_image, 'Clip', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate 90 degrees for writing text to left border
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, 90)
# add text for preserve paper arg options to left border
top_title_loc = (5, 75)
top_true_loc = (5 + int(h / 2), 190)
top_false_loc = (5, 190)
cv2.putText(bordered_comparison_image, 'Preserve Paper', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate -90 degrees to return image in correct orientation
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, -90)
return bordered_comparison_image
def image_stats(image):
"""
Parameters:
-------
image: NumPy array
OpenCV image in L*a*b* color space
Returns:
-------
Tuple of mean and standard deviations for the L*, a*, and b*
channels, respectively
"""
# compute the mean and standard deviation of each channel
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return lMean, lStd, aMean, aStd, bMean, bStd
def _min_max_scale(arr, new_range=(0, 255)):
"""
Perform min-max scaling to a NumPy array
Parameters:
-------
arr: NumPy array to be scaled to [new_min, new_max] range
new_range: tuple of form (min, max) specifying range of
transformed array
Returns:
-------
NumPy array that has been scaled to be in
[new_range[0], new_range[1]] range
"""
# get array's current min and max
mn = arr.min()
mx = arr.max()
# check if scaling needs to be done to be in new_range
if mn < new_range[0] or mx > new_range[1]:
# perform min-max scaling
scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]
else:
# return array if already in range
scaled = arr
return scaled
def _scale_array(arr, clip=True):
"""
Trim NumPy array values to be in [0, 255] range with option of
clipping or scaling.
Parameters:
-------
arr: array to be trimmed to [0, 255] range
clip: should array be scaled by np.clip? if False then input
array will be min-max scaled to range
[max([arr.min(), 0]), min([arr.max(), 255])]
Returns:
-------
NumPy array that has been scaled to be in [0, 255] range
"""
if clip:
scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))
scaled = _min_max_scale(arr, new_range=scale_range)
return scaled | en | 0.763596 | # import the necessary packages Transfers the color distribution from the source to the target image using the mean and standard deviations of the L*a*b* color space. This implementation is (loosely) based on to the "Color Transfer between Images" paper by <NAME> al., 2001. Parameters: ------- source: NumPy array OpenCV image in BGR color space (the source image) target: NumPy array OpenCV image in BGR color space (the target image) clip: Should components of L*a*b* image be scaled by np.clip before converting back to BGR color space? If False then components will be min-max scaled appropriately. Clipping will keep target image brightness truer to the input. Scaling will adjust image brightness to avoid washed out portions in the resulting color transfer that can be caused by clipping. preserve_paper: Should color transfer strictly follow methodology laid out in original paper? The method does not always produce aesthetically pleasing results. If False then L*a*b* components will scaled using the reciprocal of the scaling factor proposed in the paper. This method seems to produce more consistently aesthetically pleasing results Returns: ------- transfer: NumPy array OpenCV image (w, h, 3) NumPy array (uint8) # convert the images from the RGB to L*ab* color space, being # sure to utilizing the floating point data type (note: OpenCV # expects floats to be 32-bit, so use that instead of 64-bit) # compute color statistics for the source and target images # subtract the means from the target image # scale by the standard deviations using paper proposed factor # scale by the standard deviations using reciprocal of paper proposed factor # add in the source mean # clip/scale the pixel intensities to [0, 255] if they fall # outside this range # merge the channels together and convert back to the RGB color # space, being sure to utilize the 8-bit unsigned integer data # type # return the color transferred image Pick color_transfer result truest to source image color Applies color_transfer with all possible combinations of the clip & preserve_paper arguments. Mean absolute error (MAE) is computed for the HSV channels of each result and the source image. The best_result that minimizes the MAE is returned as well as a montage of all candidate results. Parameters: ------- source: NumPy array OpenCV image in BGR color space (the source image) target: NumPy array OpenCV image in BGR color space (the target image) Returns: ------- tuple: (best_result, comparison) best_result: NumPy array result that minimizes mean absolute error between compared to source image in HSV color space comparison: NumPy array image showing the results of all combinations of color_transfer options # get mean HSV stats from source image for comparison # iterate through all 4 options for toggling color transfer # create candidate image from options of this iteration # get mean HSV stats from candidate image for comparison # calc chi square dist # propose new truest result if found new smallest mae # build 2 by 2 image matrix of all candidates for comparison # add border annotations showing values of params for each output Apply table formatting for comparison of color_transfer options Parameters: ------- target: NumPy array OpenCV image in BGR color space (the comparison image produced in auto_color_transfer) Returns: ------- comparison: NumPy array OpenCV image in BGR color space with borders applied to easily compare the different results of the auto_color_transfer # 200 seems to work well as border size # put black border on top and left of input image # add text for clip arg options to top border # rotate 90 degrees for writing text to left border # add text for preserve paper arg options to left border # rotate -90 degrees to return image in correct orientation Parameters: ------- image: NumPy array OpenCV image in L*a*b* color space Returns: ------- Tuple of mean and standard deviations for the L*, a*, and b* channels, respectively # compute the mean and standard deviation of each channel # return the color statistics Perform min-max scaling to a NumPy array Parameters: ------- arr: NumPy array to be scaled to [new_min, new_max] range new_range: tuple of form (min, max) specifying range of transformed array Returns: ------- NumPy array that has been scaled to be in [new_range[0], new_range[1]] range # get array's current min and max # check if scaling needs to be done to be in new_range # perform min-max scaling # return array if already in range Trim NumPy array values to be in [0, 255] range with option of clipping or scaling. Parameters: ------- arr: array to be trimmed to [0, 255] range clip: should array be scaled by np.clip? if False then input array will be min-max scaled to range [max([arr.min(), 0]), min([arr.max(), 255])] Returns: ------- NumPy array that has been scaled to be in [0, 255] range | 3.59041 | 4 |
Python/Tree/TestCreateTreeLibraryImport.py | zseen/hackerrank-challenges | 0 | 858 | from Library.CreateATree import CreateATree
tree = CreateATree.BinarySearchTree()
nodesList = list((4, 5, 1, 3, 2))
for i in range(0, len(nodesList)):
tree.insert(nodesList[i])
#tree.printInorder()
tree.printPreorder()
#tree.printPostorder()
| from Library.CreateATree import CreateATree
tree = CreateATree.BinarySearchTree()
nodesList = list((4, 5, 1, 3, 2))
for i in range(0, len(nodesList)):
tree.insert(nodesList[i])
#tree.printInorder()
tree.printPreorder()
#tree.printPostorder()
| en | 0.289477 | #tree.printInorder() #tree.printPostorder() | 3.656248 | 4 |
application/siteApp/urls.py | Marcelotsvaz/vaz-projects | 0 | 859 | #
# VAZ Projects
#
#
# Author: <NAME> <<EMAIL>>
from django.urls import path
from . import views
app_name = 'siteApp'
urlpatterns = [
path( '', views.Home.as_view(), name = 'home' ),
path( 'about-me', views.About_me.as_view(), name = 'about_me' ),
path( 'search', views.Search.as_view(), name = 'search' ),
path( 'search/page/<int:page>', views.Search.as_view(), name = 'search' ),
path( 'sitemap.xml', views.Sitemap.as_view(), name = 'sitemap' ),
] | #
# VAZ Projects
#
#
# Author: <NAME> <<EMAIL>>
from django.urls import path
from . import views
app_name = 'siteApp'
urlpatterns = [
path( '', views.Home.as_view(), name = 'home' ),
path( 'about-me', views.About_me.as_view(), name = 'about_me' ),
path( 'search', views.Search.as_view(), name = 'search' ),
path( 'search/page/<int:page>', views.Search.as_view(), name = 'search' ),
path( 'sitemap.xml', views.Sitemap.as_view(), name = 'sitemap' ),
] | en | 0.370288 | # # VAZ Projects # # # Author: <NAME> <<EMAIL>> | 1.562567 | 2 |
svd/core/exc.py | epicosy/svd | 0 | 860 | <reponame>epicosy/svd<filename>svd/core/exc.py
class SVDError(Exception):
"""Generic errors."""
pass
| class SVDError(Exception):
"""Generic errors."""
pass | en | 0.517558 | Generic errors. | 1.166219 | 1 |
Classes/ServiceBase.py | tkeske/SMS-Fetcher | 0 | 861 | '''
@author <NAME>
@since 10.8.2019
'''
import sys
from jnius import autoclass
from Conf.Conf import *
class ServiceBase():
def __init__(self):
PythonServiceClass = autoclass('org.kivy.android.PythonService')
self.Context = autoclass('android.content.Context')
self.Service = PythonServiceClass.mService
#set autorestart to be imune to task swiping on Android 9
self.Service.setAutoRestartService(True)
self.confDict = {k: v for k,v in globals().items() if k.isupper() and k.startswith("SMS")}
for k, v in confDict.items():
setattr(self, k, v)
def killGeneric(self, error):
print(repr(error))
PythonService.setAutoRestartService(False)
print("Autorestart of the service disabled.")
print("Attempting to kill service permanently.")
PythonService.stop()
#service takes time to stop. flow thus continues to next block of code
#sys.exit() is to prevent subsequent code from execution
#both calls are neccesary to avoid "Scheduling restart of crashed service process"
#in case we called only sys.exit()
#this applies even if we have setAutoRestartService(False)
print("Exiting python script")
sys.exit() | '''
@author <NAME>
@since 10.8.2019
'''
import sys
from jnius import autoclass
from Conf.Conf import *
class ServiceBase():
def __init__(self):
PythonServiceClass = autoclass('org.kivy.android.PythonService')
self.Context = autoclass('android.content.Context')
self.Service = PythonServiceClass.mService
#set autorestart to be imune to task swiping on Android 9
self.Service.setAutoRestartService(True)
self.confDict = {k: v for k,v in globals().items() if k.isupper() and k.startswith("SMS")}
for k, v in confDict.items():
setattr(self, k, v)
def killGeneric(self, error):
print(repr(error))
PythonService.setAutoRestartService(False)
print("Autorestart of the service disabled.")
print("Attempting to kill service permanently.")
PythonService.stop()
#service takes time to stop. flow thus continues to next block of code
#sys.exit() is to prevent subsequent code from execution
#both calls are neccesary to avoid "Scheduling restart of crashed service process"
#in case we called only sys.exit()
#this applies even if we have setAutoRestartService(False)
print("Exiting python script")
sys.exit() | en | 0.804883 | @author <NAME> @since 10.8.2019 #set autorestart to be imune to task swiping on Android 9 #service takes time to stop. flow thus continues to next block of code #sys.exit() is to prevent subsequent code from execution #both calls are neccesary to avoid "Scheduling restart of crashed service process" #in case we called only sys.exit() #this applies even if we have setAutoRestartService(False) | 2.357394 | 2 |
api/queue/__init__.py | sofia008/api-redis-queue | 0 | 862 | # api/queue/__init__.py
import os
from flask import Flask
from flask_bootstrap import Bootstrap
# instantiate the extensions
bootstrap = Bootstrap()
def create_app(script_info=None):
# instantiate the app
app = Flask(
__name__,
template_folder="../client/templates",
static_folder="../client/static",
)
# set config
app_settings = os.getenv("APP_SETTINGS")
app.config.from_object(app_settings)
# set up extensions
bootstrap.init_app(app)
# register blueprints
from api.queue.push.views import main_blueprint
app.register_blueprint(main_blueprint)
# shell context for flask cli
app.shell_context_processor({"app": app})
return app
| # api/queue/__init__.py
import os
from flask import Flask
from flask_bootstrap import Bootstrap
# instantiate the extensions
bootstrap = Bootstrap()
def create_app(script_info=None):
# instantiate the app
app = Flask(
__name__,
template_folder="../client/templates",
static_folder="../client/static",
)
# set config
app_settings = os.getenv("APP_SETTINGS")
app.config.from_object(app_settings)
# set up extensions
bootstrap.init_app(app)
# register blueprints
from api.queue.push.views import main_blueprint
app.register_blueprint(main_blueprint)
# shell context for flask cli
app.shell_context_processor({"app": app})
return app
| en | 0.547225 | # api/queue/__init__.py # instantiate the extensions # instantiate the app # set config # set up extensions # register blueprints # shell context for flask cli | 2.17679 | 2 |
tests/test_engine.py | Foxboron/python-adblock | 35 | 863 | <filename>tests/test_engine.py<gh_stars>10-100
import adblock
import pytest
SMALL_FILTER_LIST = """
||wikipedia.org^
||old.reddit.com^
||lobste.rs^
"""
def empty_engine():
return adblock.Engine(adblock.FilterSet())
def test_engine_creation_and_blocking():
filter_set = adblock.FilterSet(debug=True)
filter_set.add_filter_list(SMALL_FILTER_LIST)
engine = adblock.Engine(filter_set=filter_set)
blocker_result_wikipedia = engine.check_network_urls(
url="https://wikipedia.org/img.png",
source_url="https://google.com/",
request_type="image",
)
assert isinstance(blocker_result_wikipedia, adblock.BlockerResult)
assert blocker_result_wikipedia.matched
blocker_result_facebook = engine.check_network_urls(
"https://facebook.com/directory/img.png",
"https://old.reddit.com/r/all",
"image",
)
assert isinstance(blocker_result_facebook, adblock.BlockerResult)
assert not blocker_result_facebook.matched
def test_serde_file(tmpdir):
path = str(tmpdir / "cache.dat")
engine0 = empty_engine()
with pytest.raises(FileNotFoundError):
# We haven't created the cache.dat file, so we should get an exception
# when attempting to deserialize.
engine0.deserialize_from_file(path)
engine1 = empty_engine()
serialization_result = engine1.serialize_to_file(path)
assert serialization_result is None
engine2 = empty_engine()
deserialization_result = engine2.deserialize_from_file(path)
assert deserialization_result is None
def test_deserialize_corrupt(tmpdir):
path = str(tmpdir / "corrupt_cache.dat")
with open(path, "w", encoding="utf-8") as f:
f.write("abc")
engine = empty_engine()
with pytest.raises(adblock.DeserializationError):
engine.deserialize_from_file(path)
with pytest.raises(adblock.DeserializationError):
engine.deserialize(b"abc")
def test_serde():
engine = empty_engine()
serialization_result = engine.serialize()
assert isinstance(serialization_result, bytes)
engine2 = empty_engine()
deserialization_result = engine2.deserialize(serialization_result)
assert deserialization_result is None
| <filename>tests/test_engine.py<gh_stars>10-100
import adblock
import pytest
SMALL_FILTER_LIST = """
||wikipedia.org^
||old.reddit.com^
||lobste.rs^
"""
def empty_engine():
return adblock.Engine(adblock.FilterSet())
def test_engine_creation_and_blocking():
filter_set = adblock.FilterSet(debug=True)
filter_set.add_filter_list(SMALL_FILTER_LIST)
engine = adblock.Engine(filter_set=filter_set)
blocker_result_wikipedia = engine.check_network_urls(
url="https://wikipedia.org/img.png",
source_url="https://google.com/",
request_type="image",
)
assert isinstance(blocker_result_wikipedia, adblock.BlockerResult)
assert blocker_result_wikipedia.matched
blocker_result_facebook = engine.check_network_urls(
"https://facebook.com/directory/img.png",
"https://old.reddit.com/r/all",
"image",
)
assert isinstance(blocker_result_facebook, adblock.BlockerResult)
assert not blocker_result_facebook.matched
def test_serde_file(tmpdir):
path = str(tmpdir / "cache.dat")
engine0 = empty_engine()
with pytest.raises(FileNotFoundError):
# We haven't created the cache.dat file, so we should get an exception
# when attempting to deserialize.
engine0.deserialize_from_file(path)
engine1 = empty_engine()
serialization_result = engine1.serialize_to_file(path)
assert serialization_result is None
engine2 = empty_engine()
deserialization_result = engine2.deserialize_from_file(path)
assert deserialization_result is None
def test_deserialize_corrupt(tmpdir):
path = str(tmpdir / "corrupt_cache.dat")
with open(path, "w", encoding="utf-8") as f:
f.write("abc")
engine = empty_engine()
with pytest.raises(adblock.DeserializationError):
engine.deserialize_from_file(path)
with pytest.raises(adblock.DeserializationError):
engine.deserialize(b"abc")
def test_serde():
engine = empty_engine()
serialization_result = engine.serialize()
assert isinstance(serialization_result, bytes)
engine2 = empty_engine()
deserialization_result = engine2.deserialize(serialization_result)
assert deserialization_result is None
| en | 0.618193 | ||wikipedia.org^ ||old.reddit.com^ ||lobste.rs^ # We haven't created the cache.dat file, so we should get an exception # when attempting to deserialize. | 2.501298 | 3 |
v1/status_updates/urls.py | DucPhamTV/Bank | 94 | 864 | <gh_stars>10-100
from rest_framework.routers import SimpleRouter
from .views.upgrade_notice import UpgradeNoticeViewSet
router = SimpleRouter(trailing_slash=False)
router.register('upgrade_notice', UpgradeNoticeViewSet, basename='upgrade_notice')
| from rest_framework.routers import SimpleRouter
from .views.upgrade_notice import UpgradeNoticeViewSet
router = SimpleRouter(trailing_slash=False)
router.register('upgrade_notice', UpgradeNoticeViewSet, basename='upgrade_notice') | none | 1 | 1.426572 | 1 |
|
data_structures/stack/largest_rectangle_area_in_histogram.py | ruler30cm/python-ds | 1,723 | 865 | <gh_stars>1000+
'''
Largest rectangle area in a histogram::
Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars.
For simplicity, assume that all bars have same width and the width is 1 unit.
'''
def max_area_histogram(histogram):
stack = list()
max_area = 0 # Initialize max area
index = 0
while index < len(histogram):
if (not stack) or (histogram[stack[-1]] <= histogram[index]):
stack.append(index)
index += 1
else:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
while stack:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
return max_area
hist = [4, 7, 1, 8, 4, 9, 5]
print("Maximum area is",
max_area_histogram(hist))
| '''
Largest rectangle area in a histogram::
Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars.
For simplicity, assume that all bars have same width and the width is 1 unit.
'''
def max_area_histogram(histogram):
stack = list()
max_area = 0 # Initialize max area
index = 0
while index < len(histogram):
if (not stack) or (histogram[stack[-1]] <= histogram[index]):
stack.append(index)
index += 1
else:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
while stack:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
return max_area
hist = [4, 7, 1, 8, 4, 9, 5]
print("Maximum area is",
max_area_histogram(hist)) | en | 0.822817 | Largest rectangle area in a histogram:: Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars. For simplicity, assume that all bars have same width and the width is 1 unit. # Initialize max area | 4.085822 | 4 |
gluon/contrib/pbkdf2_ctypes.py | Cwlowe/web2py | 9 | 866 | <reponame>Cwlowe/web2py
# -*- coding: utf-8 -*-
"""
pbkdf2_ctypes
~~~~~~
Fast pbkdf2.
This module implements pbkdf2 for Python using crypto lib from
openssl or commoncrypto.
Note: This module is intended as a plugin replacement of pbkdf2.py
by <NAME>.
Git repository:
$ git clone https://github.com/michele-comitini/pbkdf2_ctypes.git
:copyright: Copyright (c) 2013: <NAME> <<EMAIL>>
:license: LGPLv3
"""
import ctypes
import ctypes.util
import hashlib
import platform
import os.path
import binascii
import sys
__all__ = ['pkcs5_pbkdf2_hmac', 'pbkdf2_bin', 'pbkdf2_hex']
__version__ = '0.99.3'
def _commoncrypto_hashlib_to_crypto_map_get(hashfunc):
hashlib_to_crypto_map = {hashlib.sha1: 1,
hashlib.sha224: 2,
hashlib.sha256: 3,
hashlib.sha384: 4,
hashlib.sha512: 5}
crypto_hashfunc = hashlib_to_crypto_map.get(hashfunc)
if crypto_hashfunc is None:
raise ValueError('Unkwnown digest %s' % hashfunc)
return crypto_hashfunc
def _commoncrypto_pbkdf2(data, salt, iterations, digest, keylen):
"""Common Crypto compatibile wrapper
"""
c_hashfunc = ctypes.c_uint32(_commoncrypto_hashlib_to_crypto_map_get(digest))
c_pass = ctypes.c_char_p(data)
c_passlen = ctypes.c_size_t(len(data))
c_salt = ctypes.c_char_p(salt)
c_saltlen = ctypes.c_size_t(len(salt))
c_iter = ctypes.c_uint(iterations)
c_keylen = ctypes.c_size_t(keylen)
c_buff = ctypes.create_string_buffer(keylen)
crypto.CCKeyDerivationPBKDF.restype = ctypes.c_int
crypto.CCKeyDerivationPBKDF.argtypes = [ctypes.c_uint32,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_uint32,
ctypes.c_uint,
ctypes.c_char_p,
ctypes.c_size_t]
ret = crypto.CCKeyDerivationPBKDF(2, # hardcoded 2-> PBKDF2
c_pass, c_passlen,
c_salt, c_saltlen,
c_hashfunc,
c_iter,
c_buff,
c_keylen)
return (1 - ret, c_buff)
def _openssl_hashlib_to_crypto_map_get(hashfunc):
hashlib_to_crypto_map = {hashlib.md5: crypto.EVP_md5,
hashlib.sha1: crypto.EVP_sha1,
hashlib.sha256: crypto.EVP_sha256,
hashlib.sha224: crypto.EVP_sha224,
hashlib.sha384: crypto.EVP_sha384,
hashlib.sha512: crypto.EVP_sha512}
crypto_hashfunc = hashlib_to_crypto_map.get(hashfunc)
if crypto_hashfunc is None:
raise ValueError('Unkwnown digest %s' % hashfunc)
crypto_hashfunc.restype = ctypes.c_void_p
return crypto_hashfunc()
def _openssl_pbkdf2(data, salt, iterations, digest, keylen):
"""OpenSSL compatibile wrapper
"""
c_hashfunc = ctypes.c_void_p(_openssl_hashlib_to_crypto_map_get(digest))
c_pass = ctypes.c_char_p(data)
c_passlen = ctypes.c_int(len(data))
c_salt = ctypes.c_char_p(salt)
c_saltlen = ctypes.c_int(len(salt))
c_iter = ctypes.c_int(iterations)
c_keylen = ctypes.c_int(keylen)
c_buff = ctypes.create_string_buffer(keylen)
# PKCS5_PBKDF2_HMAC(const char *pass, int passlen,
# const unsigned char *salt, int saltlen, int iter,
# const EVP_MD *digest,
# int keylen, unsigned char *out);
crypto.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_char_p, ctypes.c_int,
ctypes.c_char_p, ctypes.c_int,
ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_char_p]
crypto.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
err = crypto.PKCS5_PBKDF2_HMAC(c_pass, c_passlen,
c_salt, c_saltlen,
c_iter,
c_hashfunc,
c_keylen,
c_buff)
return (err, c_buff)
try: # check that we have proper OpenSSL or Common Crypto on the system.
system = platform.system()
if system == 'Windows':
if platform.architecture()[0] == '64bit':
libname = ctypes.util.find_library('libeay64')
if not libname:
raise OSError('Library not found')
crypto = ctypes.CDLL(libname)
else:
libname = ctypes.util.find_library('libeay32')
if not libname:
raise OSError('Library libeay32 not found.')
crypto = ctypes.CDLL(libname)
_pbkdf2_hmac = _openssl_pbkdf2
crypto.PKCS5_PBKDF2_HMAC # test compatibility
elif system == 'Darwin': # think different(TM)! i.e. break things!
if [int(x) for x in platform.mac_ver()[0].split('.')] < [10, 7, 0]:
raise OSError('OS X Version too old %s < 10.7.0' % platform.mac_ver()[0])
libname = ctypes.util.find_library('System')
if not libname:
raise OSError('Library not found')
crypto = ctypes.CDLL(os.path.basename(libname))
_pbkdf2_hmac = _commoncrypto_pbkdf2
else:
libname = ctypes.util.find_library('crypto')
if not libname:
raise OSError('Library crypto not found.')
crypto = ctypes.CDLL(os.path.basename(libname))
_pbkdf2_hmac = _openssl_pbkdf2
crypto.PKCS5_PBKDF2_HMAC # test compatibility
except (OSError, AttributeError):
_, e, _ = sys.exc_info()
raise ImportError('Cannot find a compatible cryptographic library '
'on your system. %s' % e)
def pkcs5_pbkdf2_hmac(data, salt, iterations=1000, keylen=24, hashfunc=None):
if hashfunc is None:
hashfunc = hashlib.sha1
err, c_buff = _pbkdf2_hmac(data, salt, iterations, hashfunc, keylen)
if err == 0:
raise ValueError('wrong parameters')
return c_buff.raw[:keylen]
def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None):
return binascii.hexlify(pkcs5_pbkdf2_hmac(data, salt, iterations, keylen, hashfunc))
def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
return pkcs5_pbkdf2_hmac(data, salt, iterations, keylen, hashfunc)
if __name__ == '__main__':
try:
crypto.SSLeay_version.restype = ctypes.c_char_p
print(crypto.SSLeay_version(0))
except:
pass
import platform
if platform.python_version_tuple() < ('3', '0', '0'):
def bytes(*args):
return str(args[0])
for h in [hashlib.sha1, hashlib.sha224, hashlib.sha256,
hashlib.sha384, hashlib.sha512]:
print(binascii.hexlify(pkcs5_pbkdf2_hmac(bytes('secret', 'utf-8') * 11,
bytes('salt', 'utf-8'),
hashfunc=h)))
| # -*- coding: utf-8 -*-
"""
pbkdf2_ctypes
~~~~~~
Fast pbkdf2.
This module implements pbkdf2 for Python using crypto lib from
openssl or commoncrypto.
Note: This module is intended as a plugin replacement of pbkdf2.py
by <NAME>.
Git repository:
$ git clone https://github.com/michele-comitini/pbkdf2_ctypes.git
:copyright: Copyright (c) 2013: <NAME> <<EMAIL>>
:license: LGPLv3
"""
import ctypes
import ctypes.util
import hashlib
import platform
import os.path
import binascii
import sys
__all__ = ['pkcs5_pbkdf2_hmac', 'pbkdf2_bin', 'pbkdf2_hex']
__version__ = '0.99.3'
def _commoncrypto_hashlib_to_crypto_map_get(hashfunc):
hashlib_to_crypto_map = {hashlib.sha1: 1,
hashlib.sha224: 2,
hashlib.sha256: 3,
hashlib.sha384: 4,
hashlib.sha512: 5}
crypto_hashfunc = hashlib_to_crypto_map.get(hashfunc)
if crypto_hashfunc is None:
raise ValueError('Unkwnown digest %s' % hashfunc)
return crypto_hashfunc
def _commoncrypto_pbkdf2(data, salt, iterations, digest, keylen):
"""Common Crypto compatibile wrapper
"""
c_hashfunc = ctypes.c_uint32(_commoncrypto_hashlib_to_crypto_map_get(digest))
c_pass = ctypes.c_char_p(data)
c_passlen = ctypes.c_size_t(len(data))
c_salt = ctypes.c_char_p(salt)
c_saltlen = ctypes.c_size_t(len(salt))
c_iter = ctypes.c_uint(iterations)
c_keylen = ctypes.c_size_t(keylen)
c_buff = ctypes.create_string_buffer(keylen)
crypto.CCKeyDerivationPBKDF.restype = ctypes.c_int
crypto.CCKeyDerivationPBKDF.argtypes = [ctypes.c_uint32,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_uint32,
ctypes.c_uint,
ctypes.c_char_p,
ctypes.c_size_t]
ret = crypto.CCKeyDerivationPBKDF(2, # hardcoded 2-> PBKDF2
c_pass, c_passlen,
c_salt, c_saltlen,
c_hashfunc,
c_iter,
c_buff,
c_keylen)
return (1 - ret, c_buff)
def _openssl_hashlib_to_crypto_map_get(hashfunc):
hashlib_to_crypto_map = {hashlib.md5: crypto.EVP_md5,
hashlib.sha1: crypto.EVP_sha1,
hashlib.sha256: crypto.EVP_sha256,
hashlib.sha224: crypto.EVP_sha224,
hashlib.sha384: crypto.EVP_sha384,
hashlib.sha512: crypto.EVP_sha512}
crypto_hashfunc = hashlib_to_crypto_map.get(hashfunc)
if crypto_hashfunc is None:
raise ValueError('Unkwnown digest %s' % hashfunc)
crypto_hashfunc.restype = ctypes.c_void_p
return crypto_hashfunc()
def _openssl_pbkdf2(data, salt, iterations, digest, keylen):
"""OpenSSL compatibile wrapper
"""
c_hashfunc = ctypes.c_void_p(_openssl_hashlib_to_crypto_map_get(digest))
c_pass = ctypes.c_char_p(data)
c_passlen = ctypes.c_int(len(data))
c_salt = ctypes.c_char_p(salt)
c_saltlen = ctypes.c_int(len(salt))
c_iter = ctypes.c_int(iterations)
c_keylen = ctypes.c_int(keylen)
c_buff = ctypes.create_string_buffer(keylen)
# PKCS5_PBKDF2_HMAC(const char *pass, int passlen,
# const unsigned char *salt, int saltlen, int iter,
# const EVP_MD *digest,
# int keylen, unsigned char *out);
crypto.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_char_p, ctypes.c_int,
ctypes.c_char_p, ctypes.c_int,
ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_char_p]
crypto.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
err = crypto.PKCS5_PBKDF2_HMAC(c_pass, c_passlen,
c_salt, c_saltlen,
c_iter,
c_hashfunc,
c_keylen,
c_buff)
return (err, c_buff)
try: # check that we have proper OpenSSL or Common Crypto on the system.
system = platform.system()
if system == 'Windows':
if platform.architecture()[0] == '64bit':
libname = ctypes.util.find_library('libeay64')
if not libname:
raise OSError('Library not found')
crypto = ctypes.CDLL(libname)
else:
libname = ctypes.util.find_library('libeay32')
if not libname:
raise OSError('Library libeay32 not found.')
crypto = ctypes.CDLL(libname)
_pbkdf2_hmac = _openssl_pbkdf2
crypto.PKCS5_PBKDF2_HMAC # test compatibility
elif system == 'Darwin': # think different(TM)! i.e. break things!
if [int(x) for x in platform.mac_ver()[0].split('.')] < [10, 7, 0]:
raise OSError('OS X Version too old %s < 10.7.0' % platform.mac_ver()[0])
libname = ctypes.util.find_library('System')
if not libname:
raise OSError('Library not found')
crypto = ctypes.CDLL(os.path.basename(libname))
_pbkdf2_hmac = _commoncrypto_pbkdf2
else:
libname = ctypes.util.find_library('crypto')
if not libname:
raise OSError('Library crypto not found.')
crypto = ctypes.CDLL(os.path.basename(libname))
_pbkdf2_hmac = _openssl_pbkdf2
crypto.PKCS5_PBKDF2_HMAC # test compatibility
except (OSError, AttributeError):
_, e, _ = sys.exc_info()
raise ImportError('Cannot find a compatible cryptographic library '
'on your system. %s' % e)
def pkcs5_pbkdf2_hmac(data, salt, iterations=1000, keylen=24, hashfunc=None):
if hashfunc is None:
hashfunc = hashlib.sha1
err, c_buff = _pbkdf2_hmac(data, salt, iterations, hashfunc, keylen)
if err == 0:
raise ValueError('wrong parameters')
return c_buff.raw[:keylen]
def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None):
return binascii.hexlify(pkcs5_pbkdf2_hmac(data, salt, iterations, keylen, hashfunc))
def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
return pkcs5_pbkdf2_hmac(data, salt, iterations, keylen, hashfunc)
if __name__ == '__main__':
try:
crypto.SSLeay_version.restype = ctypes.c_char_p
print(crypto.SSLeay_version(0))
except:
pass
import platform
if platform.python_version_tuple() < ('3', '0', '0'):
def bytes(*args):
return str(args[0])
for h in [hashlib.sha1, hashlib.sha224, hashlib.sha256,
hashlib.sha384, hashlib.sha512]:
print(binascii.hexlify(pkcs5_pbkdf2_hmac(bytes('secret', 'utf-8') * 11,
bytes('salt', 'utf-8'),
hashfunc=h))) | en | 0.601436 | # -*- coding: utf-8 -*- pbkdf2_ctypes ~~~~~~ Fast pbkdf2. This module implements pbkdf2 for Python using crypto lib from openssl or commoncrypto. Note: This module is intended as a plugin replacement of pbkdf2.py by <NAME>. Git repository: $ git clone https://github.com/michele-comitini/pbkdf2_ctypes.git :copyright: Copyright (c) 2013: <NAME> <<EMAIL>> :license: LGPLv3 Common Crypto compatibile wrapper # hardcoded 2-> PBKDF2 OpenSSL compatibile wrapper # PKCS5_PBKDF2_HMAC(const char *pass, int passlen, # const unsigned char *salt, int saltlen, int iter, # const EVP_MD *digest, # int keylen, unsigned char *out); # check that we have proper OpenSSL or Common Crypto on the system. # test compatibility # think different(TM)! i.e. break things! # test compatibility | 2.36868 | 2 |
auth_backend/src/key_op.py | cispa/bitahoy | 0 | 867 | import sys
import os
import psycopg2
import base64
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.backends import default_backend
import time
if len(sys.argv) < 2:
print("Please enter either create or remove as a argv[1]")
sys.exit(0)
with psycopg2.connect("dbname='auth_db' user='auth_db' host='authdb' [redacted-2]") as conn:
with conn.cursor() as cursor:
if sys.argv[1] == "generate":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated!")
elif sys.argv[1] == "generate_if_needed":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
cursor.execute("SELECT * FROM key")
res = cursor.fetchall()
if len(res) == 0:
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated, as database was empty!")
else:
print("Database has key ready!")
elif sys.argv[1] == "drop":
cursor.execute("DROP TABLE key")
conn.commit()
print("Dropped old keys")
else:
print("Invalid option! Try 'drop', 'generate' or 'generate_if_needed'...") | import sys
import os
import psycopg2
import base64
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.backends import default_backend
import time
if len(sys.argv) < 2:
print("Please enter either create or remove as a argv[1]")
sys.exit(0)
with psycopg2.connect("dbname='auth_db' user='auth_db' host='authdb' [redacted-2]") as conn:
with conn.cursor() as cursor:
if sys.argv[1] == "generate":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated!")
elif sys.argv[1] == "generate_if_needed":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
cursor.execute("SELECT * FROM key")
res = cursor.fetchall()
if len(res) == 0:
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated, as database was empty!")
else:
print("Database has key ready!")
elif sys.argv[1] == "drop":
cursor.execute("DROP TABLE key")
conn.commit()
print("Dropped old keys")
else:
print("Invalid option! Try 'drop', 'generate' or 'generate_if_needed'...") | en | 0.394985 | #Load the key or generate a new one: #Load the key or generate a new one: | 2.995814 | 3 |
src/tools/types/obj.py | loongson-zn/build | 215 | 868 | <filename>src/tools/types/obj.py<gh_stars>100-1000
# Copyright <NAME> 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE.txt or copy at https://www.bfgroup.xyz/b2/LICENSE.txt)
from b2.build import type
def register ():
type.register_type ('OBJ', ['obj'], None, ['NT', 'CYGWIN'])
type.register_type ('OBJ', ['o'])
register ()
| <filename>src/tools/types/obj.py<gh_stars>100-1000
# Copyright <NAME> 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE.txt or copy at https://www.bfgroup.xyz/b2/LICENSE.txt)
from b2.build import type
def register ():
type.register_type ('OBJ', ['obj'], None, ['NT', 'CYGWIN'])
type.register_type ('OBJ', ['o'])
register ()
| en | 0.732142 | # Copyright <NAME> 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE.txt or copy at https://www.bfgroup.xyz/b2/LICENSE.txt) | 1.60506 | 2 |
sympy/polys/tests/test_sqfreetools.py | eriknw/sympy | 7 | 869 | <filename>sympy/polys/tests/test_sqfreetools.py
"""Tests for square-free decomposition algorithms and related tools. """
from sympy.polys.rings import ring
from sympy.polys.domains import FF, ZZ, QQ
from sympy.polys.polyclasses import DMP
from sympy.polys.specialpolys import f_polys
from sympy.utilities.pytest import raises
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
def test_dup_sqf():
R, x = ring("x", ZZ)
assert R.dup_sqf_part(0) == 0
assert R.dup_sqf_p(0) is True
assert R.dup_sqf_part(7) == 1
assert R.dup_sqf_p(7) is True
assert R.dup_sqf_part(2*x + 2) == x + 1
assert R.dup_sqf_p(2*x + 2) is True
assert R.dup_sqf_part(x**3 + x + 1) == x**3 + x + 1
assert R.dup_sqf_p(x**3 + x + 1) is True
assert R.dup_sqf_part(-x**3 + x + 1) == x**3 - x - 1
assert R.dup_sqf_p(-x**3 + x + 1) is True
assert R.dup_sqf_part(2*x**3 + 3*x**2) == 2*x**2 + 3*x
assert R.dup_sqf_p(2*x**3 + 3*x**2) is False
assert R.dup_sqf_part(-2*x**3 + 3*x**2) == 2*x**2 - 3*x
assert R.dup_sqf_p(-2*x**3 + 3*x**2) is False
assert R.dup_sqf_list(0) == (0, [])
assert R.dup_sqf_list(1) == (1, [])
assert R.dup_sqf_list(x) == (1, [(x, 1)])
assert R.dup_sqf_list(2*x**2) == (2, [(x, 2)])
assert R.dup_sqf_list(3*x**3) == (3, [(x, 3)])
assert R.dup_sqf_list(-x**5 + x**4 + x - 1) == \
(-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dup_sqf_list(x**8 + 6*x**6 + 12*x**4 + 8*x**2) == \
( 1, [(x, 2), (x**2 + 2, 3)])
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", QQ)
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", FF(2))
assert R.dup_sqf_list(x**2 + 1) == (1, [(x + 1, 2)])
R, x = ring("x", FF(3))
assert R.dup_sqf_list(x**10 + 2*x**7 + 2*x**4 + x) == \
(1, [(x, 1),
(x + 1, 3),
(x + 2, 6)])
R1, x = ring("x", ZZ)
R2, y = ring("y", FF(3))
f = x**3 + 1
g = y**3 + 1
assert R1.dup_sqf_part(f) == f
assert R2.dup_sqf_part(g) == y + 1
assert R1.dup_sqf_p(f) is True
assert R2.dup_sqf_p(g) is False
R, x, y = ring("x,y", ZZ)
A = x**4 - 3*x**2 + 6
D = x**6 - 5*x**4 + 5*x**2 + 4
f, g = D, R.dmp_sub(A, R.dmp_mul(R.dmp_diff(D, 1), y))
res = R.dmp_resultant(f, g)
h = (4*y**2 + 1).drop(x)
assert R.drop(x).dup_sqf_list(res) == (45796, [(h, 3)])
R, x = ring("x", ZZ["t"])
assert R.dup_sqf_list_include(DMP([1, 0, 0, 0], ZZ)*x**2) == \
[(DMP([1, 0, 0, 0], ZZ), 1), (DMP([1], ZZ)*x, 2)]
def test_dmp_sqf():
R, x, y = ring("x,y", ZZ)
assert R.dmp_sqf_part(0) == 0
assert R.dmp_sqf_p(0) is True
assert R.dmp_sqf_part(7) == 1
assert R.dmp_sqf_p(7) is True
assert R.dmp_sqf_list(3) == (3, [])
assert R.dmp_sqf_list_include(3) == [(3, 1)]
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_sqf_p(f_0) is True
assert R.dmp_sqf_p(f_0**2) is False
assert R.dmp_sqf_p(f_1) is True
assert R.dmp_sqf_p(f_1**2) is False
assert R.dmp_sqf_p(f_2) is True
assert R.dmp_sqf_p(f_2**2) is False
assert R.dmp_sqf_p(f_3) is True
assert R.dmp_sqf_p(f_3**2) is False
assert R.dmp_sqf_p(f_5) is False
assert R.dmp_sqf_p(f_5**2) is False
assert R.dmp_sqf_p(f_4) is True
assert R.dmp_sqf_part(f_4) == -f_4
assert R.dmp_sqf_part(f_5) == x + y - z
R, x, y, z, t = ring("x,y,z,t", ZZ)
assert R.dmp_sqf_p(f_6) is True
assert R.dmp_sqf_part(f_6) == f_6
R, x = ring("x", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
R, x, y = ring("x,y", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
f = -x**2 + 2*x - 1
assert R.dmp_sqf_list_include(f) == [(-1, 1), (x - 1, 2)]
R, x, y = ring("x,y", FF(2))
raises(NotImplementedError, lambda: R.dmp_sqf_list(y**2 + 1))
def test_dup_gff_list():
R, x = ring("x", ZZ)
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert R.dup_gff_list(f) == [(x, 1), (x + 2, 4)]
g = x**9 - 20*x**8 + 166*x**7 - 744*x**6 + 1965*x**5 - 3132*x**4 + 2948*x**3 - 1504*x**2 + 320*x
assert R.dup_gff_list(g) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(ValueError, lambda: R.dup_gff_list(0))
| <filename>sympy/polys/tests/test_sqfreetools.py
"""Tests for square-free decomposition algorithms and related tools. """
from sympy.polys.rings import ring
from sympy.polys.domains import FF, ZZ, QQ
from sympy.polys.polyclasses import DMP
from sympy.polys.specialpolys import f_polys
from sympy.utilities.pytest import raises
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
def test_dup_sqf():
R, x = ring("x", ZZ)
assert R.dup_sqf_part(0) == 0
assert R.dup_sqf_p(0) is True
assert R.dup_sqf_part(7) == 1
assert R.dup_sqf_p(7) is True
assert R.dup_sqf_part(2*x + 2) == x + 1
assert R.dup_sqf_p(2*x + 2) is True
assert R.dup_sqf_part(x**3 + x + 1) == x**3 + x + 1
assert R.dup_sqf_p(x**3 + x + 1) is True
assert R.dup_sqf_part(-x**3 + x + 1) == x**3 - x - 1
assert R.dup_sqf_p(-x**3 + x + 1) is True
assert R.dup_sqf_part(2*x**3 + 3*x**2) == 2*x**2 + 3*x
assert R.dup_sqf_p(2*x**3 + 3*x**2) is False
assert R.dup_sqf_part(-2*x**3 + 3*x**2) == 2*x**2 - 3*x
assert R.dup_sqf_p(-2*x**3 + 3*x**2) is False
assert R.dup_sqf_list(0) == (0, [])
assert R.dup_sqf_list(1) == (1, [])
assert R.dup_sqf_list(x) == (1, [(x, 1)])
assert R.dup_sqf_list(2*x**2) == (2, [(x, 2)])
assert R.dup_sqf_list(3*x**3) == (3, [(x, 3)])
assert R.dup_sqf_list(-x**5 + x**4 + x - 1) == \
(-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dup_sqf_list(x**8 + 6*x**6 + 12*x**4 + 8*x**2) == \
( 1, [(x, 2), (x**2 + 2, 3)])
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", QQ)
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", FF(2))
assert R.dup_sqf_list(x**2 + 1) == (1, [(x + 1, 2)])
R, x = ring("x", FF(3))
assert R.dup_sqf_list(x**10 + 2*x**7 + 2*x**4 + x) == \
(1, [(x, 1),
(x + 1, 3),
(x + 2, 6)])
R1, x = ring("x", ZZ)
R2, y = ring("y", FF(3))
f = x**3 + 1
g = y**3 + 1
assert R1.dup_sqf_part(f) == f
assert R2.dup_sqf_part(g) == y + 1
assert R1.dup_sqf_p(f) is True
assert R2.dup_sqf_p(g) is False
R, x, y = ring("x,y", ZZ)
A = x**4 - 3*x**2 + 6
D = x**6 - 5*x**4 + 5*x**2 + 4
f, g = D, R.dmp_sub(A, R.dmp_mul(R.dmp_diff(D, 1), y))
res = R.dmp_resultant(f, g)
h = (4*y**2 + 1).drop(x)
assert R.drop(x).dup_sqf_list(res) == (45796, [(h, 3)])
R, x = ring("x", ZZ["t"])
assert R.dup_sqf_list_include(DMP([1, 0, 0, 0], ZZ)*x**2) == \
[(DMP([1, 0, 0, 0], ZZ), 1), (DMP([1], ZZ)*x, 2)]
def test_dmp_sqf():
R, x, y = ring("x,y", ZZ)
assert R.dmp_sqf_part(0) == 0
assert R.dmp_sqf_p(0) is True
assert R.dmp_sqf_part(7) == 1
assert R.dmp_sqf_p(7) is True
assert R.dmp_sqf_list(3) == (3, [])
assert R.dmp_sqf_list_include(3) == [(3, 1)]
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_sqf_p(f_0) is True
assert R.dmp_sqf_p(f_0**2) is False
assert R.dmp_sqf_p(f_1) is True
assert R.dmp_sqf_p(f_1**2) is False
assert R.dmp_sqf_p(f_2) is True
assert R.dmp_sqf_p(f_2**2) is False
assert R.dmp_sqf_p(f_3) is True
assert R.dmp_sqf_p(f_3**2) is False
assert R.dmp_sqf_p(f_5) is False
assert R.dmp_sqf_p(f_5**2) is False
assert R.dmp_sqf_p(f_4) is True
assert R.dmp_sqf_part(f_4) == -f_4
assert R.dmp_sqf_part(f_5) == x + y - z
R, x, y, z, t = ring("x,y,z,t", ZZ)
assert R.dmp_sqf_p(f_6) is True
assert R.dmp_sqf_part(f_6) == f_6
R, x = ring("x", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
R, x, y = ring("x,y", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
f = -x**2 + 2*x - 1
assert R.dmp_sqf_list_include(f) == [(-1, 1), (x - 1, 2)]
R, x, y = ring("x,y", FF(2))
raises(NotImplementedError, lambda: R.dmp_sqf_list(y**2 + 1))
def test_dup_gff_list():
R, x = ring("x", ZZ)
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert R.dup_gff_list(f) == [(x, 1), (x + 2, 4)]
g = x**9 - 20*x**8 + 166*x**7 - 744*x**6 + 1965*x**5 - 3132*x**4 + 2948*x**3 - 1504*x**2 + 320*x
assert R.dup_gff_list(g) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(ValueError, lambda: R.dup_gff_list(0))
| en | 0.841356 | Tests for square-free decomposition algorithms and related tools. | 2.501293 | 3 |
ezno_convert/enums.py | ofersadan85/ezno_convert | 2 | 870 | import enum
from typing import Union
@enum.unique
class PPT(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/powerpoint.ppsaveasfiletype
AnimatedGIF = 40
BMP = 19
Default = 11
EMF = 23
External = 64000
GIF = 16
JPG = 17
META = 15
MP4 = 39
OpenPresentation = 35
PDF = 32
PNG = 18
Presentation = 1
RTF = 6
SHOW = 7
Template = 5
TIF = 21
WMV = 37
XPS = 33
app = 'Powerpoint.Application'
extensions = ('.ppt', '.pptx')
@enum.unique
class WORD(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat
DosText = 4
DosTextLineBreaks = 5
FilteredHTML = 10
FlatXML = 19
OpenDocumentText = 23
HTML = 8
RTF = 6
Template = 1
Text = 2
TextLineBreaks = 3
UnicodeText = 7
WebArchive = 9
XML = 11
Document97 = 0
DocumentDefault = 16
PDF = 17
XPS = 18
app = 'Word.Application'
extensions = ('.doc', '.docx')
@enum.unique
class XL(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/excel.xlfixedformattype
# TODO: Implement "SaveAs" methods, see: https://docs.microsoft.com/en-us/office/vba/api/excel.workbook.saveas
PDF = 0
XPS = 1
app = 'Excel.Application'
extensions = ('.xls', '.xlsx')
enum_types = Union[PPT, WORD, XL]
| import enum
from typing import Union
@enum.unique
class PPT(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/powerpoint.ppsaveasfiletype
AnimatedGIF = 40
BMP = 19
Default = 11
EMF = 23
External = 64000
GIF = 16
JPG = 17
META = 15
MP4 = 39
OpenPresentation = 35
PDF = 32
PNG = 18
Presentation = 1
RTF = 6
SHOW = 7
Template = 5
TIF = 21
WMV = 37
XPS = 33
app = 'Powerpoint.Application'
extensions = ('.ppt', '.pptx')
@enum.unique
class WORD(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat
DosText = 4
DosTextLineBreaks = 5
FilteredHTML = 10
FlatXML = 19
OpenDocumentText = 23
HTML = 8
RTF = 6
Template = 1
Text = 2
TextLineBreaks = 3
UnicodeText = 7
WebArchive = 9
XML = 11
Document97 = 0
DocumentDefault = 16
PDF = 17
XPS = 18
app = 'Word.Application'
extensions = ('.doc', '.docx')
@enum.unique
class XL(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/excel.xlfixedformattype
# TODO: Implement "SaveAs" methods, see: https://docs.microsoft.com/en-us/office/vba/api/excel.workbook.saveas
PDF = 0
XPS = 1
app = 'Excel.Application'
extensions = ('.xls', '.xlsx')
enum_types = Union[PPT, WORD, XL]
| en | 0.466289 | # Source: https://docs.microsoft.com/en-us/office/vba/api/powerpoint.ppsaveasfiletype # Source: https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat # Source: https://docs.microsoft.com/en-us/office/vba/api/excel.xlfixedformattype # TODO: Implement "SaveAs" methods, see: https://docs.microsoft.com/en-us/office/vba/api/excel.workbook.saveas | 2.715698 | 3 |
app/django_first/news/migrations/0002_movies_year.py | vvuri/flask_pipeline | 0 | 871 | <reponame>vvuri/flask_pipeline<gh_stars>0
# Generated by Django 4.0.1 on 2022-01-19 23:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='movies',
name='year',
field=models.CharField(max_length=4, null=True),
),
]
| # Generated by Django 4.0.1 on 2022-01-19 23:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='movies',
name='year',
field=models.CharField(max_length=4, null=True),
),
] | en | 0.875601 | # Generated by Django 4.0.1 on 2022-01-19 23:58 | 1.688774 | 2 |
scripts/issue_param_value.py | Jhsmit/awesome-panel-extensions | 3 | 872 | import panel as pn
import param
from awesome_panel_extensions.frameworks.fast import FastTemplate, FastTextInput
WIDGETS = {
"some_text": {"type": FastTextInput, "readonly": True, "sizing_mode": "fixed", "width": 400}
}
class ParameterizedApp(param.Parameterized):
some_text = param.String(default="This is some text")
view = param.Parameter()
def __init__(self, **params):
super().__init__(**params)
self.view = pn.Param(self, parameters=["some_text"], widgets=WIDGETS)
parameterized_app = ParameterizedApp()
paremeterized_template = FastTemplate(main=[parameterized_app.view])
paremeterized_template.servable()
| import panel as pn
import param
from awesome_panel_extensions.frameworks.fast import FastTemplate, FastTextInput
WIDGETS = {
"some_text": {"type": FastTextInput, "readonly": True, "sizing_mode": "fixed", "width": 400}
}
class ParameterizedApp(param.Parameterized):
some_text = param.String(default="This is some text")
view = param.Parameter()
def __init__(self, **params):
super().__init__(**params)
self.view = pn.Param(self, parameters=["some_text"], widgets=WIDGETS)
parameterized_app = ParameterizedApp()
paremeterized_template = FastTemplate(main=[parameterized_app.view])
paremeterized_template.servable()
| none | 1 | 2.2115 | 2 |
|
gjqyxyxxcxxt/gjqyxyxxcxxt/queue_companies.py | AisinoPythonTeam/PythonAiniso | 0 | 873 | # -*- coding: utf-8 -*-
import pymysql
import sys, os, json, time, pymongo
app_dir = os.path.abspath("../")
sys.path.append(app_dir)
from gjqyxyxxcxxt import settings
from gjqyxyxxcxxt.database.my_redis import QueueRedis
conn = None
def connect_db():
global conn
conn = pymysql.connect(host="172.16.16.15",port=3306,user="root",passwd="<PASSWORD>",db="ixinnuo_sjcj",charset="utf8")
return
def get_req_from_db():
global conn
cursor = conn.cursor()
cursor.execute('select id, entname from req where status=0 order by id limit 10')
results = cursor.fetchall()
companies = []
for res in results:
company = {}
company['id'] = res[0]
company['name'] = res[1]
companies.append(company)
return companies
def main():
my_queue = QueueRedis()
result = my_queue.get_queue_length(settings.COMPANIES)
print result
#mq 里存在数据则,3秒后退出
if result:
time.sleep(3)
exit()
time.sleep(3)
global conn
connect_db()
source = get_req_from_db()
for id_name in source:
message = json.dumps(id_name)
my_queue.send_to_queue(settings.COMPANIES, message)
conn.close()
print '成功添加队列%s条数据!!!' % len(source)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
import pymysql
import sys, os, json, time, pymongo
app_dir = os.path.abspath("../")
sys.path.append(app_dir)
from gjqyxyxxcxxt import settings
from gjqyxyxxcxxt.database.my_redis import QueueRedis
conn = None
def connect_db():
global conn
conn = pymysql.connect(host="172.16.16.15",port=3306,user="root",passwd="<PASSWORD>",db="ixinnuo_sjcj",charset="utf8")
return
def get_req_from_db():
global conn
cursor = conn.cursor()
cursor.execute('select id, entname from req where status=0 order by id limit 10')
results = cursor.fetchall()
companies = []
for res in results:
company = {}
company['id'] = res[0]
company['name'] = res[1]
companies.append(company)
return companies
def main():
my_queue = QueueRedis()
result = my_queue.get_queue_length(settings.COMPANIES)
print result
#mq 里存在数据则,3秒后退出
if result:
time.sleep(3)
exit()
time.sleep(3)
global conn
connect_db()
source = get_req_from_db()
for id_name in source:
message = json.dumps(id_name)
my_queue.send_to_queue(settings.COMPANIES, message)
conn.close()
print '成功添加队列%s条数据!!!' % len(source)
if __name__ == '__main__':
main()
| zh | 0.78634 | # -*- coding: utf-8 -*- #mq 里存在数据则,3秒后退出 | 2.31045 | 2 |
tests/pm/update_sla.py | supsi-dacd-isaac/parity-sidechain-interface | 0 | 874 | # Importing section
import json
import requests
import argparse
import hashlib
import time
from http import HTTPStatus
# Main
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
args = arg_parser.parse_args()
set_cmd = 'updateSla'
params = {
'idx': 'sla04',
'start': 3000,
'end': 3900
}
cmd_url = 'http://localhost:9119/%s' % set_cmd
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
print('COMMAND: %s' % cmd_url)
print('PARAMS: %s' % params)
r = requests.post(cmd_url, headers=headers, json=params)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
# Wait some seconds to be sure that the transaction has been handled
time.sleep(5)
check_tx_url = 'http://localhost:9119/checkTx/%s' % data['tx_hash']
print('CHECK TX: %s' % check_tx_url)
r = requests.get(check_tx_url)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
| # Importing section
import json
import requests
import argparse
import hashlib
import time
from http import HTTPStatus
# Main
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
args = arg_parser.parse_args()
set_cmd = 'updateSla'
params = {
'idx': 'sla04',
'start': 3000,
'end': 3900
}
cmd_url = 'http://localhost:9119/%s' % set_cmd
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
print('COMMAND: %s' % cmd_url)
print('PARAMS: %s' % params)
r = requests.post(cmd_url, headers=headers, json=params)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
# Wait some seconds to be sure that the transaction has been handled
time.sleep(5)
check_tx_url = 'http://localhost:9119/checkTx/%s' % data['tx_hash']
print('CHECK TX: %s' % check_tx_url)
r = requests.get(check_tx_url)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
| en | 0.950438 | # Importing section # Main # Wait some seconds to be sure that the transaction has been handled | 2.547431 | 3 |
array/python3/5_move_all_negative_elements.py | jitendragangwar123/cp | 0 | 875 | <reponame>jitendragangwar123/cp
def sort(arr):
# Start index 0.
start = 0
# End index
end = len(arr)-1
while start <= end:
# Swap all positive value with last index end & decrease end by 1.
if arr[start] >= 0:
arr[start], arr[end] = arr[end], arr[start]
end -= 1
else:
# If arr[start] is not positive then increase start by 1.
start += 1
if __name__ == "__main__":
arr = [-1, 2, -3, 4, 5, 6, -7, 8, 9]
sort(arr)
print(arr)
| def sort(arr):
# Start index 0.
start = 0
# End index
end = len(arr)-1
while start <= end:
# Swap all positive value with last index end & decrease end by 1.
if arr[start] >= 0:
arr[start], arr[end] = arr[end], arr[start]
end -= 1
else:
# If arr[start] is not positive then increase start by 1.
start += 1
if __name__ == "__main__":
arr = [-1, 2, -3, 4, 5, 6, -7, 8, 9]
sort(arr)
print(arr) | en | 0.698022 | # Start index 0. # End index # Swap all positive value with last index end & decrease end by 1. # If arr[start] is not positive then increase start by 1. | 3.947454 | 4 |
misc.py | hldai/wikiprocesspy | 0 | 876 | <gh_stars>0
import json
def __text_from_anchor_sents_file(anchor_sents_file, output_file):
f = open(anchor_sents_file, encoding='utf-8')
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for i, line in enumerate(f):
sent = json.loads(line)
fout.write('{}\n'.format(sent['tokens']))
# if i > 5:
# break
f.close()
fout.close()
def merge_files(filenames, output_file):
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for filename in filenames:
print(filename)
f = open(filename, encoding='utf-8')
for line in f:
fout.write(line)
f.close()
fout.close()
wiki19_anchor_sents_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents.txt'
anchor_sent_texts_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts.txt'
# __text_from_anchor_sents_file(wiki19_anchor_sents_file, anchor_sent_texts_file)
part_pos_tag_files = [f'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos-{i}.txt' for i in range(4)]
pos_tag_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos.txt'
# merge_files(part_pos_tag_files, pos_tag_file)
| import json
def __text_from_anchor_sents_file(anchor_sents_file, output_file):
f = open(anchor_sents_file, encoding='utf-8')
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for i, line in enumerate(f):
sent = json.loads(line)
fout.write('{}\n'.format(sent['tokens']))
# if i > 5:
# break
f.close()
fout.close()
def merge_files(filenames, output_file):
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for filename in filenames:
print(filename)
f = open(filename, encoding='utf-8')
for line in f:
fout.write(line)
f.close()
fout.close()
wiki19_anchor_sents_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents.txt'
anchor_sent_texts_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts.txt'
# __text_from_anchor_sents_file(wiki19_anchor_sents_file, anchor_sent_texts_file)
part_pos_tag_files = [f'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos-{i}.txt' for i in range(4)]
pos_tag_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos.txt'
# merge_files(part_pos_tag_files, pos_tag_file) | en | 0.45505 | # if i > 5: # break # __text_from_anchor_sents_file(wiki19_anchor_sents_file, anchor_sent_texts_file) # merge_files(part_pos_tag_files, pos_tag_file) | 2.854048 | 3 |
test/smptest.py | myrtam/CANNR | 0 | 877 | """
Test harness for smp.py
"""
import sys
import os
sys.path.append('/Users/ptendick/open-source-workspace/cannr Image/source/cannr/lib')
os.environ['PATH'] = '/Library/Frameworks/Python.framework/Versions/3.7/bin:' + os.environ['PATH']
import cannr
import smp
# Test openProcess by opening a Flask process
def test_openProcess1():
return smp.openProcess(
{"processInfo": "processInfo"},
['python', '/Users/ptendick/open-source-workspace/cannr Image/test/flaskSample.py', '5000', '1'])
# Test openProcess by opening a Plumber process
def test_openProcess2():
return smp.openProcess(
{"processInfo": "processInfo"},
['Rscript', '--vanilla', '/Users/ptendick/open-source-workspace/cannr Image/source/cannr/runApp.R',
'/Users/ptendick/open-source-workspace/cannr Image/test/hello.R', '5001', '2'])
# Test countPorts
def test_countPorts():
projectFilePath = '/Users/ptendick/open-source-workspace/MyRTAM Service/working/project1/project.json'
project = cannr.readJSONFile(projectFilePath)
return smp.countPorts(project)
| """
Test harness for smp.py
"""
import sys
import os
sys.path.append('/Users/ptendick/open-source-workspace/cannr Image/source/cannr/lib')
os.environ['PATH'] = '/Library/Frameworks/Python.framework/Versions/3.7/bin:' + os.environ['PATH']
import cannr
import smp
# Test openProcess by opening a Flask process
def test_openProcess1():
return smp.openProcess(
{"processInfo": "processInfo"},
['python', '/Users/ptendick/open-source-workspace/cannr Image/test/flaskSample.py', '5000', '1'])
# Test openProcess by opening a Plumber process
def test_openProcess2():
return smp.openProcess(
{"processInfo": "processInfo"},
['Rscript', '--vanilla', '/Users/ptendick/open-source-workspace/cannr Image/source/cannr/runApp.R',
'/Users/ptendick/open-source-workspace/cannr Image/test/hello.R', '5001', '2'])
# Test countPorts
def test_countPorts():
projectFilePath = '/Users/ptendick/open-source-workspace/MyRTAM Service/working/project1/project.json'
project = cannr.readJSONFile(projectFilePath)
return smp.countPorts(project)
| en | 0.902208 | Test harness for smp.py # Test openProcess by opening a Flask process # Test openProcess by opening a Plumber process # Test countPorts | 1.906461 | 2 |
neural_toolbox/inception.py | ibrahimSouleiman/GuessWhat | 0 | 878 | <reponame>ibrahimSouleiman/GuessWhat
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.resnet_v1 as resnet_v1
import tensorflow.contrib.slim.python.slim.nets.inception_v1 as inception_v1
import tensorflow.contrib.slim.python.slim.nets.resnet_utils as slim_utils
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import arg_scope
import os
def get_resnet_arg_scope(bn_fn):
"""
Trick to apply CBN from a pretrained tf network. It overides the batchnorm constructor with cbn
:param bn_fn: cbn factory
:return: tensorflow scope
"""
with arg_scope(
[layers_lib.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=bn_fn,
normalizer_params=None) as arg_sc:
return arg_sc
def create_inception(image_input, is_training, scope="", inception_out="Mixed_5c", resnet_version=50, cbn=None):
"""
Create a resnet by overidding the classic batchnorm with conditional batchnorm
:param image_input: placeholder with image
:param is_training: are you using the resnet at training_time or test_time
:param scope: tensorflow scope
:param resnet_version: 50/101/152
:param cbn: the cbn factory
:return: the resnet output
"""
# assert False, "\n" \
# "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
# "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
# arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)
# print("--- 1")
arg_sc = inception_v1.inception_v1_arg_scope()
# Pick the correct version of the resnet
# if resnet_version == 50:
# current_resnet = resnet_v1.resnet_v1_50
# elif resnet_version == 101:
# current_resnet = resnet_v1.resnet_v1_101
# elif resnet_version == 152:
# current_resnet = resnet_v1.resnet_v1_152
# else:
# raise ValueError("Unsupported resnet version")
# inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out)
# print("--- 2")
inception_scope = inception_out
# print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))
# print("--- 3")
with slim.arg_scope(arg_sc):
net, end_points = inception_v1.inception_v1(image_input, 1001) # 1000 is the number of softmax class
print("Net = ",net)
# print("--- 4")
if len(scope) > 0 and not scope.endswith("/"):
scope += "/"
# print("--- 5")
# print(end_points)
print(" Batch ",inception_scope)
out = end_points[scope + inception_scope]
print("-- out Use: {},output = {}".format(inception_scope,out))
return out,end_points
| import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.resnet_v1 as resnet_v1
import tensorflow.contrib.slim.python.slim.nets.inception_v1 as inception_v1
import tensorflow.contrib.slim.python.slim.nets.resnet_utils as slim_utils
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import arg_scope
import os
def get_resnet_arg_scope(bn_fn):
"""
Trick to apply CBN from a pretrained tf network. It overides the batchnorm constructor with cbn
:param bn_fn: cbn factory
:return: tensorflow scope
"""
with arg_scope(
[layers_lib.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=bn_fn,
normalizer_params=None) as arg_sc:
return arg_sc
def create_inception(image_input, is_training, scope="", inception_out="Mixed_5c", resnet_version=50, cbn=None):
"""
Create a resnet by overidding the classic batchnorm with conditional batchnorm
:param image_input: placeholder with image
:param is_training: are you using the resnet at training_time or test_time
:param scope: tensorflow scope
:param resnet_version: 50/101/152
:param cbn: the cbn factory
:return: the resnet output
"""
# assert False, "\n" \
# "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
# "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
# arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)
# print("--- 1")
arg_sc = inception_v1.inception_v1_arg_scope()
# Pick the correct version of the resnet
# if resnet_version == 50:
# current_resnet = resnet_v1.resnet_v1_50
# elif resnet_version == 101:
# current_resnet = resnet_v1.resnet_v1_101
# elif resnet_version == 152:
# current_resnet = resnet_v1.resnet_v1_152
# else:
# raise ValueError("Unsupported resnet version")
# inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out)
# print("--- 2")
inception_scope = inception_out
# print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))
# print("--- 3")
with slim.arg_scope(arg_sc):
net, end_points = inception_v1.inception_v1(image_input, 1001) # 1000 is the number of softmax class
print("Net = ",net)
# print("--- 4")
if len(scope) > 0 and not scope.endswith("/"):
scope += "/"
# print("--- 5")
# print(end_points)
print(" Batch ",inception_scope)
out = end_points[scope + inception_scope]
print("-- out Use: {},output = {}".format(inception_scope,out))
return out,end_points | en | 0.677341 | Trick to apply CBN from a pretrained tf network. It overides the batchnorm constructor with cbn :param bn_fn: cbn factory :return: tensorflow scope Create a resnet by overidding the classic batchnorm with conditional batchnorm :param image_input: placeholder with image :param is_training: are you using the resnet at training_time or test_time :param scope: tensorflow scope :param resnet_version: 50/101/152 :param cbn: the cbn factory :return: the resnet output # assert False, "\n" \ # "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \ # "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}" # arg_sc = slim_utils.resnet_arg_scope(is_training=is_training) # print("--- 1") # Pick the correct version of the resnet # if resnet_version == 50: # current_resnet = resnet_v1.resnet_v1_50 # elif resnet_version == 101: # current_resnet = resnet_v1.resnet_v1_101 # elif resnet_version == 152: # current_resnet = resnet_v1.resnet_v1_152 # else: # raise ValueError("Unsupported resnet version") # inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out) # print("--- 2") # print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope)) # print("--- 3") # 1000 is the number of softmax class # print("--- 4") # print("--- 5") # print(end_points) | 2.464854 | 2 |
timm/utils/checkpoint_saver.py | Robert-JunWang/pytorch-image-models | 17,769 | 879 | <filename>timm/utils/checkpoint_saver.py
""" Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 <NAME>
"""
import glob
import operator
import os
import logging
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.last_recovery_file = ''
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
if os.path.exists(last_save_path):
os.unlink(last_save_path) # required for Windows support.
os.rename(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (len(self.checkpoint_files) < self.max_history
or metric is None or self.cmp(metric, worst_file[1])):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
os.link(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files, key=lambda x: x[1],
reverse=not self.decreasing) # sort in descending order if a lower metric is not better
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
if os.path.exists(best_save_path):
os.unlink(best_save_path)
os.link(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._save(save_path, epoch)
if os.path.exists(self.last_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))
os.remove(self.last_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file))
self.last_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
| <filename>timm/utils/checkpoint_saver.py
""" Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 <NAME>
"""
import glob
import operator
import os
import logging
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.last_recovery_file = ''
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
if os.path.exists(last_save_path):
os.unlink(last_save_path) # required for Windows support.
os.rename(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (len(self.checkpoint_files) < self.max_history
or metric is None or self.cmp(metric, worst_file[1])):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
os.link(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files, key=lambda x: x[1],
reverse=not self.decreasing) # sort in descending order if a lower metric is not better
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
if os.path.exists(best_save_path):
os.unlink(best_save_path)
os.link(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._save(save_path, epoch)
if os.path.exists(self.last_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))
os.remove(self.last_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file))
self.last_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
| en | 0.785354 | Checkpoint Saver Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. Hacked together by / Copyright 2020 <NAME> # objects to save state_dicts of # state # (filename, metric) tuples in order of decreasing betterness # config # a lower metric is better if True # True if lhs better than rhs # required for Windows support. # sort in descending order if a lower metric is not better # version < 2 increments epoch before save | 2.330914 | 2 |
AGC004/AGC004a.py | VolgaKurvar/AtCoder | 0 | 880 | <filename>AGC004/AGC004a.py
# AGC004a
def main():
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
a, b, c = map(int, input().split())
if a % 2 == 0 or b % 2 == 0 or c % 2 == 0:
print(0)
exit(0)
print(min(a*b, b*c, c*a))
if __name__ == '__main__':
main()
| <filename>AGC004/AGC004a.py
# AGC004a
def main():
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
a, b, c = map(int, input().split())
if a % 2 == 0 or b % 2 == 0 or c % 2 == 0:
print(0)
exit(0)
print(min(a*b, b*c, c*a))
if __name__ == '__main__':
main()
| none | 1 | 2.79856 | 3 |
|
glance/tests/functional/test_api.py | arvindn05/glance | 0 | 881 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Version-independent api tests"""
import httplib2
from oslo_serialization import jsonutils
from six.moves import http_client
from glance.tests import functional
# TODO(rosmaita): all the EXPERIMENTAL stuff in this file can be ripped out
# when v2.6 becomes CURRENT in Queens
def _generate_v1_versions(url):
v1_versions = {'versions': [
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
return v1_versions
def _generate_v2_versions(url):
version_list = []
version_list.extend([
{
'id': 'v2.6',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.5',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
}
])
v2_versions = {'versions': version_list}
return v2_versions
def _generate_all_versions(url):
v1 = _generate_v1_versions(url)
v2 = _generate_v2_versions(url)
all_versions = {'versions': v2['versions'] + v1['versions']}
return all_versions
class TestApiVersions(functional.FunctionalTest):
def test_version_configurations(self):
"""Test that versioning is handled properly through all channels"""
# v1 and v2 api enabled
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_all_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
def test_v2_api_configuration(self):
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_v2_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
def test_v1_api_configuration(self):
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_v1_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
class TestApiPaths(functional.FunctionalTest):
def setUp(self):
super(TestApiPaths, self).setUp()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
self.versions = _generate_all_versions(url)
images = {'images': []}
self.images_json = jsonutils.dumps(images)
def test_get_root_path(self):
"""Assert GET / with `no Accept:` header.
Verify version choices returned.
Bug lp:803260 no Accept header causes a 500 in glance-api
"""
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_images_path(self):
"""Assert GET /images with `no Accept:` header.
Verify version choices returned.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v1_images_path(self):
"""GET /v1/images with `no Accept:` header.
Verify empty images list returned.
"""
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
def test_get_root_path_with_unknown_header(self):
"""Assert GET / with Accept: unknown header
Verify version choices returned. Verify message in API log about
unknown accept header.
"""
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_root_path_with_openstack_header(self):
"""Assert GET / with an Accept: application/vnd.openstack.images-v1
Verify empty image list returned
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.images_json, content.decode())
def test_get_images_path_with_openstack_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v1` header.
Verify version choices returned. Verify message in API log
about unknown accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v10_images_path(self):
"""Assert GET /v1.0/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_v1a_images_path(self):
"""Assert GET /v1.a/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_va1_images_path(self):
"""Assert GET /va.1/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_versions_path(self):
"""Assert GET /versions with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_versions_path_with_openstack_header(self):
"""Assert GET /versions with the
`Accept: application/vnd.openstack.images-v1` header.
Verify version choices returned.
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v1_versions_path(self):
"""Assert GET /v1/versions with `no Accept:` header
Verify 404 returned
"""
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.NOT_FOUND, response.status)
def test_get_versions_choices(self):
"""Verify version choices returned"""
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_images_path_with_openstack_v2_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v2` header.
Verify version choices returned. Verify message in API log
about unknown version in accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v12_images_path(self):
"""Assert GET /v1.2/images with `no Accept:` header
Verify version choices returned
"""
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
| # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Version-independent api tests"""
import httplib2
from oslo_serialization import jsonutils
from six.moves import http_client
from glance.tests import functional
# TODO(rosmaita): all the EXPERIMENTAL stuff in this file can be ripped out
# when v2.6 becomes CURRENT in Queens
def _generate_v1_versions(url):
v1_versions = {'versions': [
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
return v1_versions
def _generate_v2_versions(url):
version_list = []
version_list.extend([
{
'id': 'v2.6',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.5',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
}
])
v2_versions = {'versions': version_list}
return v2_versions
def _generate_all_versions(url):
v1 = _generate_v1_versions(url)
v2 = _generate_v2_versions(url)
all_versions = {'versions': v2['versions'] + v1['versions']}
return all_versions
class TestApiVersions(functional.FunctionalTest):
def test_version_configurations(self):
"""Test that versioning is handled properly through all channels"""
# v1 and v2 api enabled
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_all_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
def test_v2_api_configuration(self):
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_v2_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
def test_v1_api_configuration(self):
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_v1_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
class TestApiPaths(functional.FunctionalTest):
def setUp(self):
super(TestApiPaths, self).setUp()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
self.versions = _generate_all_versions(url)
images = {'images': []}
self.images_json = jsonutils.dumps(images)
def test_get_root_path(self):
"""Assert GET / with `no Accept:` header.
Verify version choices returned.
Bug lp:803260 no Accept header causes a 500 in glance-api
"""
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_images_path(self):
"""Assert GET /images with `no Accept:` header.
Verify version choices returned.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v1_images_path(self):
"""GET /v1/images with `no Accept:` header.
Verify empty images list returned.
"""
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
def test_get_root_path_with_unknown_header(self):
"""Assert GET / with Accept: unknown header
Verify version choices returned. Verify message in API log about
unknown accept header.
"""
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_root_path_with_openstack_header(self):
"""Assert GET / with an Accept: application/vnd.openstack.images-v1
Verify empty image list returned
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.images_json, content.decode())
def test_get_images_path_with_openstack_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v1` header.
Verify version choices returned. Verify message in API log
about unknown accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v10_images_path(self):
"""Assert GET /v1.0/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_v1a_images_path(self):
"""Assert GET /v1.a/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_va1_images_path(self):
"""Assert GET /va.1/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_versions_path(self):
"""Assert GET /versions with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_versions_path_with_openstack_header(self):
"""Assert GET /versions with the
`Accept: application/vnd.openstack.images-v1` header.
Verify version choices returned.
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v1_versions_path(self):
"""Assert GET /v1/versions with `no Accept:` header
Verify 404 returned
"""
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.NOT_FOUND, response.status)
def test_get_versions_choices(self):
"""Verify version choices returned"""
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_images_path_with_openstack_v2_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v2` header.
Verify version choices returned. Verify message in API log
about unknown version in accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v12_images_path(self):
"""Assert GET /v1.2/images with `no Accept:` header
Verify version choices returned
"""
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
| en | 0.746664 | # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Version-independent api tests # TODO(rosmaita): all the EXPERIMENTAL stuff in this file can be ripped out # when v2.6 becomes CURRENT in Queens Test that versioning is handled properly through all channels # v1 and v2 api enabled # Verify version choices returned. # Verify version choices returned. # Verify version choices returned. Assert GET / with `no Accept:` header. Verify version choices returned. Bug lp:803260 no Accept header causes a 500 in glance-api Assert GET /images with `no Accept:` header. Verify version choices returned. GET /v1/images with `no Accept:` header. Verify empty images list returned. Assert GET / with Accept: unknown header Verify version choices returned. Verify message in API log about unknown accept header. Assert GET / with an Accept: application/vnd.openstack.images-v1 Verify empty image list returned Assert GET /images with a `Accept: application/vnd.openstack.compute-v1` header. Verify version choices returned. Verify message in API log about unknown accept header. Assert GET /v1.0/images with no Accept: header Verify version choices returned Assert GET /v1.a/images with no Accept: header Verify version choices returned Assert GET /va.1/images with no Accept: header Verify version choices returned Assert GET /versions with no Accept: header Verify version choices returned Assert GET /versions with the `Accept: application/vnd.openstack.images-v1` header. Verify version choices returned. Assert GET /v1/versions with `no Accept:` header Verify 404 returned Verify version choices returned Assert GET /images with a `Accept: application/vnd.openstack.compute-v2` header. Verify version choices returned. Verify message in API log about unknown version in accept header. Assert GET /v1.2/images with `no Accept:` header Verify version choices returned | 1.617004 | 2 |
qcore/asserts.py | corey-sobel/qcore | 1 | 882 | # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module with assertion helpers.
The advantages of using a method like
assert_eq(expected, actual)
instead of
assert expected == actual
include:
1 - On failures, assert_eq prints an informative message of the actual
values compared (e.g. AssertionError: 1 != 2) for free, which makes it
faster and easier to iterate on tests.
2 - In the context of refactors, basic asserts incorrectly shift the burden of
adding printouts and writing good test code to people refactoring code
rather than the person who initially wrote the code.
"""
__all__ = [
"assert_is",
"assert_is_not",
"assert_is_instance",
"assert_eq",
"assert_dict_eq",
"assert_ne",
"assert_gt",
"assert_ge",
"assert_lt",
"assert_le",
"assert_in",
"assert_not_in",
"assert_in_with_tolerance",
"assert_unordered_list_eq",
"assert_raises",
"AssertRaises",
# Strings
"assert_is_substring",
"assert_is_not_substring",
"assert_startswith",
"assert_endswith",
]
# The unittest.py testing framework checks for this variable in a module to
# filter out stack frames from that module from the test output, in order to
# make the output more concise.
# __unittest = 1
import traceback
from .inspection import get_full_name
_number_types = (int, float, complex)
def _assert_fail_message(message, expected, actual, comparison_str, extra):
if message:
return message
if extra:
return "%a %s %a (%s)" % (expected, comparison_str, actual, extra)
return "%a %s %a" % (expected, comparison_str, actual)
def assert_is(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is not actual."""
assert expected is actual, _assert_fail_message(
message, expected, actual, "is not", extra
)
def assert_is_not(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is actual."""
assert expected is not actual, _assert_fail_message(
message, expected, actual, "is", extra
)
def assert_is_instance(value, types, message=None, extra=None):
"""Raises an AssertionError if value is not an instance of type(s)."""
assert isinstance(value, types), _assert_fail_message(
message, value, types, "is not an instance of", extra
)
def assert_eq(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected != actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is larger than the tolerance.
"""
if tolerance is None:
assert expected == actual, _assert_fail_message(
message, expected, actual, "!=", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff <= tolerance, _assert_fail_message(
message, expected, actual, "is more than %a away from" % tolerance, extra
)
def _dict_path_string(path):
if len(path) == 0:
return "(root)"
return "->".join(map(ascii, path))
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %a" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %a" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
)
def assert_ne(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected == actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is smaller than the tolerance.
"""
if tolerance is None:
assert expected != actual, _assert_fail_message(
message, expected, actual, "==", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff > tolerance, _assert_fail_message(
message, expected, actual, "is less than %a away from" % tolerance, extra
)
def assert_gt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand <= right_hand."""
assert left > right, _assert_fail_message(message, left, right, "<=", extra)
def assert_ge(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand < right_hand."""
assert left >= right, _assert_fail_message(message, left, right, "<", extra)
def assert_lt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand >= right_hand."""
assert left < right, _assert_fail_message(message, left, right, ">=", extra)
def assert_le(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand > right_hand."""
assert left <= right, _assert_fail_message(message, left, right, ">", extra)
def assert_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq."""
assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, str) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra)
def assert_in_with_tolerance(obj, seq, tolerance, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq using assert_eq cmp."""
for i in seq:
try:
assert_eq(obj, i, tolerance=tolerance, message=message, extra=extra)
return
except AssertionError:
pass
assert False, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_unordered_list_eq(expected, actual, message=None):
"""Raises an AssertionError if the objects contained
in expected are not equal to the objects contained
in actual without regard to their order.
This takes quadratic time in the umber of elements in actual; don't use it for very long lists.
"""
missing_in_actual = []
missing_in_expected = list(actual)
for x in expected:
try:
missing_in_expected.remove(x)
except ValueError:
missing_in_actual.append(x)
if missing_in_actual or missing_in_expected:
if not message:
message = (
"%a not equal to %a; missing items: %a in expected, %a in actual."
% (expected, actual, missing_in_expected, missing_in_actual)
)
assert False, message
def assert_raises(fn, *expected_exception_types):
"""Raises an AssertionError if calling fn does not raise one of the expected_exception-types."""
with AssertRaises(*expected_exception_types):
fn()
class AssertRaises(object):
"""With-context that asserts that the code within the context raises the specified exception."""
def __init__(self, *expected_exception_types, **kwargs):
# when you don't specify the exception expected, it's easy to write buggy tests that appear
# to pass but actually throw an exception different from the expected one
assert (
len(expected_exception_types) >= 1
), "You must specify the exception type when using AssertRaises"
self.expected_exception_types = set(expected_exception_types)
self.expected_exception_found = None
self.extra = kwargs.pop("extra", None)
assert_eq({}, kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type in self.expected_exception_types:
# Return True to suppress the Exception if the type matches. For details,
# see: http://docs.python.org/release/2.5.2/lib/typecontextmanager.html
self.expected_exception_found = exc_val
return True
for t in self.expected_exception_types:
if isinstance(exc_val, t):
self.expected_exception_found = exc_val
return True
expected = ", ".join(map(get_full_name, self.expected_exception_types))
if exc_type is None:
message = "No exception raised, but expected: %s" % expected
if self.extra is not None:
message += " (%s)" % self.extra
else:
template = (
"{TYPE}: {VAL} is raised, but expected:"
" {EXPECTED}{EXTRA_STR}\n\n{STACK}"
)
message = template.format(
TYPE=get_full_name(exc_type),
VAL=exc_val,
EXPECTED=expected,
STACK="".join(traceback.format_tb(exc_tb)),
EXTRA_STR=(" (%s)" % self.extra) if self.extra is not None else "",
)
raise AssertionError(message)
# ===================================================
# Strings
# ===================================================
def assert_is_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is not a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) != -1)
), _assert_fail_message(message, substring, subject, "is not in", extra)
def assert_is_not_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) == -1)
), _assert_fail_message(message, substring, subject, "is in", extra)
def assert_startswith(prefix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not start with prefix."""
assert (
(type(subject) is str)
and (type(prefix) is str)
and (subject.startswith(prefix))
), _assert_fail_message(message, subject, prefix, "does not start with", extra)
def assert_endswith(suffix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not end with suffix."""
assert (
(type(subject) is str) and (type(suffix) is str) and (subject.endswith(suffix))
), _assert_fail_message(message, subject, suffix, "does not end with", extra)
| # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module with assertion helpers.
The advantages of using a method like
assert_eq(expected, actual)
instead of
assert expected == actual
include:
1 - On failures, assert_eq prints an informative message of the actual
values compared (e.g. AssertionError: 1 != 2) for free, which makes it
faster and easier to iterate on tests.
2 - In the context of refactors, basic asserts incorrectly shift the burden of
adding printouts and writing good test code to people refactoring code
rather than the person who initially wrote the code.
"""
__all__ = [
"assert_is",
"assert_is_not",
"assert_is_instance",
"assert_eq",
"assert_dict_eq",
"assert_ne",
"assert_gt",
"assert_ge",
"assert_lt",
"assert_le",
"assert_in",
"assert_not_in",
"assert_in_with_tolerance",
"assert_unordered_list_eq",
"assert_raises",
"AssertRaises",
# Strings
"assert_is_substring",
"assert_is_not_substring",
"assert_startswith",
"assert_endswith",
]
# The unittest.py testing framework checks for this variable in a module to
# filter out stack frames from that module from the test output, in order to
# make the output more concise.
# __unittest = 1
import traceback
from .inspection import get_full_name
_number_types = (int, float, complex)
def _assert_fail_message(message, expected, actual, comparison_str, extra):
if message:
return message
if extra:
return "%a %s %a (%s)" % (expected, comparison_str, actual, extra)
return "%a %s %a" % (expected, comparison_str, actual)
def assert_is(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is not actual."""
assert expected is actual, _assert_fail_message(
message, expected, actual, "is not", extra
)
def assert_is_not(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is actual."""
assert expected is not actual, _assert_fail_message(
message, expected, actual, "is", extra
)
def assert_is_instance(value, types, message=None, extra=None):
"""Raises an AssertionError if value is not an instance of type(s)."""
assert isinstance(value, types), _assert_fail_message(
message, value, types, "is not an instance of", extra
)
def assert_eq(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected != actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is larger than the tolerance.
"""
if tolerance is None:
assert expected == actual, _assert_fail_message(
message, expected, actual, "!=", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff <= tolerance, _assert_fail_message(
message, expected, actual, "is more than %a away from" % tolerance, extra
)
def _dict_path_string(path):
if len(path) == 0:
return "(root)"
return "->".join(map(ascii, path))
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %a" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %a" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
)
def assert_ne(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected == actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is smaller than the tolerance.
"""
if tolerance is None:
assert expected != actual, _assert_fail_message(
message, expected, actual, "==", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff > tolerance, _assert_fail_message(
message, expected, actual, "is less than %a away from" % tolerance, extra
)
def assert_gt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand <= right_hand."""
assert left > right, _assert_fail_message(message, left, right, "<=", extra)
def assert_ge(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand < right_hand."""
assert left >= right, _assert_fail_message(message, left, right, "<", extra)
def assert_lt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand >= right_hand."""
assert left < right, _assert_fail_message(message, left, right, ">=", extra)
def assert_le(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand > right_hand."""
assert left <= right, _assert_fail_message(message, left, right, ">", extra)
def assert_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq."""
assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, str) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra)
def assert_in_with_tolerance(obj, seq, tolerance, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq using assert_eq cmp."""
for i in seq:
try:
assert_eq(obj, i, tolerance=tolerance, message=message, extra=extra)
return
except AssertionError:
pass
assert False, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_unordered_list_eq(expected, actual, message=None):
"""Raises an AssertionError if the objects contained
in expected are not equal to the objects contained
in actual without regard to their order.
This takes quadratic time in the umber of elements in actual; don't use it for very long lists.
"""
missing_in_actual = []
missing_in_expected = list(actual)
for x in expected:
try:
missing_in_expected.remove(x)
except ValueError:
missing_in_actual.append(x)
if missing_in_actual or missing_in_expected:
if not message:
message = (
"%a not equal to %a; missing items: %a in expected, %a in actual."
% (expected, actual, missing_in_expected, missing_in_actual)
)
assert False, message
def assert_raises(fn, *expected_exception_types):
"""Raises an AssertionError if calling fn does not raise one of the expected_exception-types."""
with AssertRaises(*expected_exception_types):
fn()
class AssertRaises(object):
"""With-context that asserts that the code within the context raises the specified exception."""
def __init__(self, *expected_exception_types, **kwargs):
# when you don't specify the exception expected, it's easy to write buggy tests that appear
# to pass but actually throw an exception different from the expected one
assert (
len(expected_exception_types) >= 1
), "You must specify the exception type when using AssertRaises"
self.expected_exception_types = set(expected_exception_types)
self.expected_exception_found = None
self.extra = kwargs.pop("extra", None)
assert_eq({}, kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type in self.expected_exception_types:
# Return True to suppress the Exception if the type matches. For details,
# see: http://docs.python.org/release/2.5.2/lib/typecontextmanager.html
self.expected_exception_found = exc_val
return True
for t in self.expected_exception_types:
if isinstance(exc_val, t):
self.expected_exception_found = exc_val
return True
expected = ", ".join(map(get_full_name, self.expected_exception_types))
if exc_type is None:
message = "No exception raised, but expected: %s" % expected
if self.extra is not None:
message += " (%s)" % self.extra
else:
template = (
"{TYPE}: {VAL} is raised, but expected:"
" {EXPECTED}{EXTRA_STR}\n\n{STACK}"
)
message = template.format(
TYPE=get_full_name(exc_type),
VAL=exc_val,
EXPECTED=expected,
STACK="".join(traceback.format_tb(exc_tb)),
EXTRA_STR=(" (%s)" % self.extra) if self.extra is not None else "",
)
raise AssertionError(message)
# ===================================================
# Strings
# ===================================================
def assert_is_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is not a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) != -1)
), _assert_fail_message(message, substring, subject, "is not in", extra)
def assert_is_not_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) == -1)
), _assert_fail_message(message, substring, subject, "is in", extra)
def assert_startswith(prefix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not start with prefix."""
assert (
(type(subject) is str)
and (type(prefix) is str)
and (subject.startswith(prefix))
), _assert_fail_message(message, subject, prefix, "does not start with", extra)
def assert_endswith(suffix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not end with suffix."""
assert (
(type(subject) is str) and (type(suffix) is str) and (subject.endswith(suffix))
), _assert_fail_message(message, subject, suffix, "does not end with", extra)
| en | 0.785275 | # Copyright 2016 Quora, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Module with assertion helpers. The advantages of using a method like assert_eq(expected, actual) instead of assert expected == actual include: 1 - On failures, assert_eq prints an informative message of the actual values compared (e.g. AssertionError: 1 != 2) for free, which makes it faster and easier to iterate on tests. 2 - In the context of refactors, basic asserts incorrectly shift the burden of adding printouts and writing good test code to people refactoring code rather than the person who initially wrote the code. # Strings # The unittest.py testing framework checks for this variable in a module to # filter out stack frames from that module from the test output, in order to # make the output more concise. # __unittest = 1 Raises an AssertionError if expected is not actual. Raises an AssertionError if expected is actual. Raises an AssertionError if value is not an instance of type(s). Raises an AssertionError if expected != actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is larger than the tolerance. Asserts that two dictionaries are equal, producing a custom message if they are not. Raises an AssertionError if expected == actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is smaller than the tolerance. Raises an AssertionError if left_hand <= right_hand. Raises an AssertionError if left_hand < right_hand. Raises an AssertionError if left_hand >= right_hand. Raises an AssertionError if left_hand > right_hand. Raises an AssertionError if obj is not in seq. Raises an AssertionError if obj is in iter. # for very long strings, provide a truncated error Raises an AssertionError if obj is not in seq using assert_eq cmp. Raises an AssertionError if the objects contained in expected are not equal to the objects contained in actual without regard to their order. This takes quadratic time in the umber of elements in actual; don't use it for very long lists. Raises an AssertionError if calling fn does not raise one of the expected_exception-types. With-context that asserts that the code within the context raises the specified exception. # when you don't specify the exception expected, it's easy to write buggy tests that appear # to pass but actually throw an exception different from the expected one # Return True to suppress the Exception if the type matches. For details, # see: http://docs.python.org/release/2.5.2/lib/typecontextmanager.html # =================================================== # Strings # =================================================== Raises an AssertionError if substring is not a substring of subject. Raises an AssertionError if substring is a substring of subject. Raises an AssertionError if the subject string does not start with prefix. Raises an AssertionError if the subject string does not end with suffix. | 2.426263 | 2 |
lib/galaxy/web/__init__.py | rikeshi/galaxy | 4 | 883 | <gh_stars>1-10
"""
The Galaxy web application framework
"""
from .framework import url_for
from .framework.base import httpexceptions
from .framework.decorators import (
do_not_cache,
error,
expose,
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
json,
json_pretty,
legacy_expose_api,
legacy_expose_api_anonymous,
legacy_expose_api_raw,
legacy_expose_api_raw_anonymous,
require_admin,
require_login,
)
__all__ = ('FormBuilder', 'do_not_cache', 'error', 'expose', 'expose_api',
'expose_api_anonymous', 'expose_api_anonymous_and_sessionless',
'expose_api_raw', 'expose_api_raw_anonymous',
'expose_api_raw_anonymous_and_sessionless', 'form',
'format_return_as_json', 'httpexceptions', 'json', 'json_pretty',
'legacy_expose_api', 'legacy_expose_api_anonymous',
'legacy_expose_api_raw', 'legacy_expose_api_raw_anonymous',
'require_admin', 'require_login', 'url_for')
| """
The Galaxy web application framework
"""
from .framework import url_for
from .framework.base import httpexceptions
from .framework.decorators import (
do_not_cache,
error,
expose,
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
json,
json_pretty,
legacy_expose_api,
legacy_expose_api_anonymous,
legacy_expose_api_raw,
legacy_expose_api_raw_anonymous,
require_admin,
require_login,
)
__all__ = ('FormBuilder', 'do_not_cache', 'error', 'expose', 'expose_api',
'expose_api_anonymous', 'expose_api_anonymous_and_sessionless',
'expose_api_raw', 'expose_api_raw_anonymous',
'expose_api_raw_anonymous_and_sessionless', 'form',
'format_return_as_json', 'httpexceptions', 'json', 'json_pretty',
'legacy_expose_api', 'legacy_expose_api_anonymous',
'legacy_expose_api_raw', 'legacy_expose_api_raw_anonymous',
'require_admin', 'require_login', 'url_for') | en | 0.437753 | The Galaxy web application framework | 1.644444 | 2 |
src/python/pants/core/goals/check_test.py | yoav-orca/pants | 1,806 | 884 | <reponame>yoav-orca/pants
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABCMeta, abstractmethod
from pathlib import Path
from textwrap import dedent
from typing import ClassVar, Iterable, List, Optional, Tuple, Type
from pants.core.goals.check import Check, CheckRequest, CheckResult, CheckResults, check
from pants.core.util_rules.distdir import DistDir
from pants.engine.addresses import Address
from pants.engine.fs import Workspace
from pants.engine.target import FieldSet, MultipleSourcesField, Target, Targets
from pants.engine.unions import UnionMembership
from pants.testutil.option_util import create_options_bootstrapper
from pants.testutil.rule_runner import MockGet, RuleRunner, mock_console, run_rule_with_mocks
from pants.util.logging import LogLevel
class MockTarget(Target):
alias = "mock_target"
core_fields = (MultipleSourcesField,)
class MockCheckFieldSet(FieldSet):
required_fields = (MultipleSourcesField,)
class MockCheckRequest(CheckRequest, metaclass=ABCMeta):
field_set_type = MockCheckFieldSet
checker_name: ClassVar[str]
@staticmethod
@abstractmethod
def exit_code(_: Iterable[Address]) -> int:
pass
@property
def check_results(self) -> CheckResults:
addresses = [config.address for config in self.field_sets]
return CheckResults(
[
CheckResult(
self.exit_code(addresses),
"",
"",
)
],
checker_name=self.checker_name,
)
class SuccessfulRequest(MockCheckRequest):
checker_name = "SuccessfulChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 0
class FailingRequest(MockCheckRequest):
checker_name = "FailingChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 1
class ConditionallySucceedsRequest(MockCheckRequest):
checker_name = "ConditionallySucceedsChecker"
@staticmethod
def exit_code(addresses: Iterable[Address]) -> int:
if any(address.target_name == "bad" for address in addresses):
return 127
return 0
class SkippedRequest(MockCheckRequest):
@staticmethod
def exit_code(_) -> int:
return 0
@property
def check_results(self) -> CheckResults:
return CheckResults([], checker_name="SkippedChecker")
class InvalidField(MultipleSourcesField):
pass
class InvalidFieldSet(MockCheckFieldSet):
required_fields = (InvalidField,)
class InvalidRequest(MockCheckRequest):
field_set_type = InvalidFieldSet
checker_name = "InvalidChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return -1
def make_target(address: Optional[Address] = None) -> Target:
if address is None:
address = Address("", target_name="tests")
return MockTarget({}, address)
def run_typecheck_rule(
*, request_types: List[Type[CheckRequest]], targets: List[Target]
) -> Tuple[int, str]:
union_membership = UnionMembership({CheckRequest: request_types})
with mock_console(create_options_bootstrapper()) as (console, stdio_reader):
rule_runner = RuleRunner()
result: Check = run_rule_with_mocks(
check,
rule_args=[
console,
Workspace(rule_runner.scheduler, _enforce_effects=False),
Targets(targets),
DistDir(relpath=Path("dist")),
union_membership,
],
mock_gets=[
MockGet(
output_type=CheckResults,
input_type=CheckRequest,
mock=lambda field_set_collection: field_set_collection.check_results,
),
],
union_membership=union_membership,
)
assert not stdio_reader.get_stdout()
return result.exit_code, stdio_reader.get_stderr()
def test_invalid_target_noops() -> None:
exit_code, stderr = run_typecheck_rule(request_types=[InvalidRequest], targets=[make_target()])
assert exit_code == 0
assert stderr == ""
def test_summary() -> None:
good_address = Address("", target_name="good")
bad_address = Address("", target_name="bad")
exit_code, stderr = run_typecheck_rule(
request_types=[
ConditionallySucceedsRequest,
FailingRequest,
SkippedRequest,
SuccessfulRequest,
],
targets=[make_target(good_address), make_target(bad_address)],
)
assert exit_code == FailingRequest.exit_code([bad_address])
assert stderr == dedent(
"""\
𐄂 ConditionallySucceedsChecker failed.
𐄂 FailingChecker failed.
- SkippedChecker skipped.
✓ SuccessfulChecker succeeded.
"""
)
def test_streaming_output_skip() -> None:
results = CheckResults([], checker_name="typechecker")
assert results.level() == LogLevel.DEBUG
assert results.message() == "typechecker skipped."
def test_streaming_output_success() -> None:
results = CheckResults([CheckResult(0, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.INFO
assert results.message() == dedent(
"""\
typechecker succeeded.
stdout
stderr
"""
)
def test_streaming_output_failure() -> None:
results = CheckResults([CheckResult(18, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 18).
stdout
stderr
"""
)
def test_streaming_output_partitions() -> None:
results = CheckResults(
[
CheckResult(21, "", "", partition_description="ghc8.1"),
CheckResult(0, "stdout", "stderr", partition_description="ghc9.2"),
],
checker_name="typechecker",
)
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 21).
Partition #1 - ghc8.1:
Partition #2 - ghc9.2:
stdout
stderr
"""
)
| # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABCMeta, abstractmethod
from pathlib import Path
from textwrap import dedent
from typing import ClassVar, Iterable, List, Optional, Tuple, Type
from pants.core.goals.check import Check, CheckRequest, CheckResult, CheckResults, check
from pants.core.util_rules.distdir import DistDir
from pants.engine.addresses import Address
from pants.engine.fs import Workspace
from pants.engine.target import FieldSet, MultipleSourcesField, Target, Targets
from pants.engine.unions import UnionMembership
from pants.testutil.option_util import create_options_bootstrapper
from pants.testutil.rule_runner import MockGet, RuleRunner, mock_console, run_rule_with_mocks
from pants.util.logging import LogLevel
class MockTarget(Target):
alias = "mock_target"
core_fields = (MultipleSourcesField,)
class MockCheckFieldSet(FieldSet):
required_fields = (MultipleSourcesField,)
class MockCheckRequest(CheckRequest, metaclass=ABCMeta):
field_set_type = MockCheckFieldSet
checker_name: ClassVar[str]
@staticmethod
@abstractmethod
def exit_code(_: Iterable[Address]) -> int:
pass
@property
def check_results(self) -> CheckResults:
addresses = [config.address for config in self.field_sets]
return CheckResults(
[
CheckResult(
self.exit_code(addresses),
"",
"",
)
],
checker_name=self.checker_name,
)
class SuccessfulRequest(MockCheckRequest):
checker_name = "SuccessfulChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 0
class FailingRequest(MockCheckRequest):
checker_name = "FailingChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 1
class ConditionallySucceedsRequest(MockCheckRequest):
checker_name = "ConditionallySucceedsChecker"
@staticmethod
def exit_code(addresses: Iterable[Address]) -> int:
if any(address.target_name == "bad" for address in addresses):
return 127
return 0
class SkippedRequest(MockCheckRequest):
@staticmethod
def exit_code(_) -> int:
return 0
@property
def check_results(self) -> CheckResults:
return CheckResults([], checker_name="SkippedChecker")
class InvalidField(MultipleSourcesField):
pass
class InvalidFieldSet(MockCheckFieldSet):
required_fields = (InvalidField,)
class InvalidRequest(MockCheckRequest):
field_set_type = InvalidFieldSet
checker_name = "InvalidChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return -1
def make_target(address: Optional[Address] = None) -> Target:
if address is None:
address = Address("", target_name="tests")
return MockTarget({}, address)
def run_typecheck_rule(
*, request_types: List[Type[CheckRequest]], targets: List[Target]
) -> Tuple[int, str]:
union_membership = UnionMembership({CheckRequest: request_types})
with mock_console(create_options_bootstrapper()) as (console, stdio_reader):
rule_runner = RuleRunner()
result: Check = run_rule_with_mocks(
check,
rule_args=[
console,
Workspace(rule_runner.scheduler, _enforce_effects=False),
Targets(targets),
DistDir(relpath=Path("dist")),
union_membership,
],
mock_gets=[
MockGet(
output_type=CheckResults,
input_type=CheckRequest,
mock=lambda field_set_collection: field_set_collection.check_results,
),
],
union_membership=union_membership,
)
assert not stdio_reader.get_stdout()
return result.exit_code, stdio_reader.get_stderr()
def test_invalid_target_noops() -> None:
exit_code, stderr = run_typecheck_rule(request_types=[InvalidRequest], targets=[make_target()])
assert exit_code == 0
assert stderr == ""
def test_summary() -> None:
good_address = Address("", target_name="good")
bad_address = Address("", target_name="bad")
exit_code, stderr = run_typecheck_rule(
request_types=[
ConditionallySucceedsRequest,
FailingRequest,
SkippedRequest,
SuccessfulRequest,
],
targets=[make_target(good_address), make_target(bad_address)],
)
assert exit_code == FailingRequest.exit_code([bad_address])
assert stderr == dedent(
"""\
𐄂 ConditionallySucceedsChecker failed.
𐄂 FailingChecker failed.
- SkippedChecker skipped.
✓ SuccessfulChecker succeeded.
"""
)
def test_streaming_output_skip() -> None:
results = CheckResults([], checker_name="typechecker")
assert results.level() == LogLevel.DEBUG
assert results.message() == "typechecker skipped."
def test_streaming_output_success() -> None:
results = CheckResults([CheckResult(0, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.INFO
assert results.message() == dedent(
"""\
typechecker succeeded.
stdout
stderr
"""
)
def test_streaming_output_failure() -> None:
results = CheckResults([CheckResult(18, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 18).
stdout
stderr
"""
)
def test_streaming_output_partitions() -> None:
results = CheckResults(
[
CheckResult(21, "", "", partition_description="ghc8.1"),
CheckResult(0, "stdout", "stderr", partition_description="ghc9.2"),
],
checker_name="typechecker",
)
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 21).
Partition #1 - ghc8.1:
Partition #2 - ghc9.2:
stdout
stderr
"""
) | en | 0.600738 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). \ 𐄂 ConditionallySucceedsChecker failed. 𐄂 FailingChecker failed. - SkippedChecker skipped. ✓ SuccessfulChecker succeeded. \ typechecker succeeded. stdout stderr \ typechecker failed (exit code 18). stdout stderr \ typechecker failed (exit code 21). Partition #1 - ghc8.1: Partition #2 - ghc9.2: stdout stderr | 2.139554 | 2 |
data-processing/entities/definitions/model/utils.py | alexkreidler/scholarphi | 0 | 885 | import os
import random
from typing import Any, Dict, List, Union
import numpy as np
import torch
from colorama import Fore, Style
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import precision_score, recall_score
def highlight(input_: Any) -> str:
input_ = str(input_)
return str(Fore.YELLOW + str(input_) + Style.RESET_ALL)
def get_intent_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.intent_label_file), "r", encoding="utf-8"
)
]
def get_slot_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.slot_label_file), "r", encoding="utf-8"
)
]
def get_pos_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.pos_label_file), "r", encoding="utf-8"
)
]
def set_torch_seed(seed: Any, no_cuda: bool) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # type: ignore
if not no_cuda and torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # type: ignore
def compute_metrics(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
assert (
len(intent_preds) == len(intent_labels) == len(slot_preds) == len(slot_labels)
)
results: Dict[Any, Any] = {}
intent_result = get_intent_acc(intent_preds, intent_labels)
slot_result = get_slot_metrics(slot_preds, slot_labels)
sementic_result = get_sentence_frame_acc(
intent_preds, intent_labels, slot_preds, slot_labels
)
# New metrics added following Dan's request.
slot_simple_result = get_slot_simple_metrics(slot_preds, slot_labels)
partial_match_result = get_partial_match_metrics(slot_preds, slot_labels)
results.update(intent_result)
results.update(slot_result)
results.update(sementic_result)
results.update(slot_simple_result)
results.update(partial_match_result)
return results
def simplify_tokens(preds: List[str]) -> List[str]:
simple_preds = []
for p in preds:
if p.endswith("TERM"):
simple_preds.append("TERM")
elif p.endswith("DEF"):
simple_preds.append("DEF")
else:
simple_preds.append(p)
return simple_preds
def get_partial_match_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a ‘partial match’ happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are
Partial match precision = P/M
Partial match recall = P/N
"""
assert len(preds) == len(labels)
both_in_preds, both_in_labels = [], []
partial_matches, exact_matches = [], []
for pred_sent, label_sent in zip(preds, labels):
simple_pred_sent = simplify_tokens(pred_sent)
simple_label_sent = simplify_tokens(label_sent)
# check whether term/def exist together
both_in_pred = "TERM" in simple_pred_sent and "DEF" in simple_pred_sent
both_in_label = "TERM" in simple_label_sent and "DEF" in simple_label_sent
both_in_preds.append(both_in_pred)
both_in_labels.append(both_in_label)
partial_match = False
exact_match = False
match: List[Union[str, bool]] = []
if both_in_pred and both_in_label:
for p, l in zip(simple_pred_sent, simple_label_sent):
if p == l:
match.append(p)
else:
match.append(False)
if "TERM" in match and "DEF" in match:
partial_match = True
if False not in match:
exact_match = True
partial_matches.append(partial_match)
exact_matches.append(exact_match)
count_both_in_preds = sum(both_in_preds) # N
count_both_in_labels = sum(both_in_labels) # M
count_partial_matches = sum(partial_matches) # P
count_exact_matches = sum(exact_matches) # E
partial_precision = count_partial_matches / count_both_in_preds
partial_recall = count_partial_matches / count_both_in_labels
partial_fscore = (
2 * partial_precision * partial_recall / (partial_precision + partial_recall)
)
exact_precision = count_exact_matches / count_both_in_preds
exact_recall = count_exact_matches / count_both_in_labels
exact_fscore = 2 * exact_precision * exact_recall / (exact_precision + exact_recall)
return {
"partial_match_precision": partial_precision,
"partial_match_recall": partial_recall,
"partial_match_f1": partial_fscore,
"exact_match_precision": exact_precision,
"excat_match_recall": exact_recall,
"excat_match_f1": exact_fscore,
}
def get_slot_simple_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Conceptually, define the following new types of ‘virtual tags’
TERM = B-term OR I-Term (ie the union of those two tags)
DEF = B-Def OR I-Def
Now, what are the P,R & F1 numbers for TERM and DEF? (I think these matter because users may just care about accuracy of term and defn matching and the macro averaged scores conflate other things like recall on these metrics and precision on O. Likewise the current macro average treats missing the first word in a definition differently from skipping the last word.
"""
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
# simplify by replacing {B,I}-TERM to TERM and {B,I}-DEF to DEF
simple_preds = simplify_tokens(preds_flattened)
simple_labels = simplify_tokens(labels_flattened)
assert len(simple_preds) == len(simple_labels)
label_names = ["O", "TERM", "DEF"]
p, r, f, s = score(simple_labels, simple_preds, average=None, labels=label_names)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# pprint(per_class)
return {
"slot_merged_TERM_precision": per_class["p"][1],
"slot_merged_TERM_recall": per_class["r"][1],
"slot_merged_TERM_f1": per_class["f"][1],
"slot_merged_DEFINITION_precision": per_class["p"][2],
"slot_merged_DEFINITION_recall": per_class["r"][2],
"slot_merged_DEFINITION_f1": per_class["f"][2],
}
def get_slot_metrics(preds: List[List[str]], labels: List[List[str]]) -> Dict[Any, Any]:
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
macro_f1 = f1_score(labels_flattened, preds_flattened, average="macro")
micro_f1 = f1_score(labels_flattened, preds_flattened, average="micro")
macro_p = precision_score(labels_flattened, preds_flattened, average="macro")
micro_p = precision_score(labels_flattened, preds_flattened, average="micro")
macro_r = recall_score(labels_flattened, preds_flattened, average="macro")
micro_r = recall_score(labels_flattened, preds_flattened, average="micro")
label_names = ["O", "B-TERM", "I-TERM", "B-DEF", "I-DEF"]
p, r, f, s = score(
labels_flattened, preds_flattened, average=None, labels=label_names
)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# print(per_class)
return {
"slot_precision_macro": macro_p,
"slot_recall_macro": macro_r,
"slot_f1_macro": macro_f1,
"slot_precision_micro": micro_p,
"slot_recall_micro": micro_r,
"slot_f1_micro": micro_f1,
"slot_precision_per_label": per_class["p"],
"slot_recal_per_label": per_class["r"],
"slot_f1_per_label": per_class["f"],
"slot_num_per_label": per_class["s"],
}
def get_intent_acc(preds: List[str], labels: List[str]) -> Dict[Any, Any]:
acc = (preds == labels).mean()
return {"intent_acc": acc}
def read_prediction_text(args: Any) -> List[str]:
return [
text.strip()
for text in open(
os.path.join(args.pred_dir, args.pred_input_file), "r", encoding="utf-8"
)
]
def get_sentence_frame_acc(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
"""For the cases that intent and all the slots are correct (in one sentence)"""
# Get the intent comparison result
intent_result = intent_preds == intent_labels
# Get the slot comparision result
slot_result = []
for preds, labels in zip(slot_preds, slot_labels):
assert len(preds) == len(labels)
one_sent_result = True
for p, l in zip(preds, labels):
if p != l:
one_sent_result = False
break
slot_result.append(one_sent_result)
slot_result = np.array(slot_result)
sementic_acc = np.multiply(intent_result, slot_result).mean()
return {"sementic_frame_acc": sementic_acc}
| import os
import random
from typing import Any, Dict, List, Union
import numpy as np
import torch
from colorama import Fore, Style
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import precision_score, recall_score
def highlight(input_: Any) -> str:
input_ = str(input_)
return str(Fore.YELLOW + str(input_) + Style.RESET_ALL)
def get_intent_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.intent_label_file), "r", encoding="utf-8"
)
]
def get_slot_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.slot_label_file), "r", encoding="utf-8"
)
]
def get_pos_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.pos_label_file), "r", encoding="utf-8"
)
]
def set_torch_seed(seed: Any, no_cuda: bool) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # type: ignore
if not no_cuda and torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # type: ignore
def compute_metrics(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
assert (
len(intent_preds) == len(intent_labels) == len(slot_preds) == len(slot_labels)
)
results: Dict[Any, Any] = {}
intent_result = get_intent_acc(intent_preds, intent_labels)
slot_result = get_slot_metrics(slot_preds, slot_labels)
sementic_result = get_sentence_frame_acc(
intent_preds, intent_labels, slot_preds, slot_labels
)
# New metrics added following Dan's request.
slot_simple_result = get_slot_simple_metrics(slot_preds, slot_labels)
partial_match_result = get_partial_match_metrics(slot_preds, slot_labels)
results.update(intent_result)
results.update(slot_result)
results.update(sementic_result)
results.update(slot_simple_result)
results.update(partial_match_result)
return results
def simplify_tokens(preds: List[str]) -> List[str]:
simple_preds = []
for p in preds:
if p.endswith("TERM"):
simple_preds.append("TERM")
elif p.endswith("DEF"):
simple_preds.append("DEF")
else:
simple_preds.append(p)
return simple_preds
def get_partial_match_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a ‘partial match’ happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are
Partial match precision = P/M
Partial match recall = P/N
"""
assert len(preds) == len(labels)
both_in_preds, both_in_labels = [], []
partial_matches, exact_matches = [], []
for pred_sent, label_sent in zip(preds, labels):
simple_pred_sent = simplify_tokens(pred_sent)
simple_label_sent = simplify_tokens(label_sent)
# check whether term/def exist together
both_in_pred = "TERM" in simple_pred_sent and "DEF" in simple_pred_sent
both_in_label = "TERM" in simple_label_sent and "DEF" in simple_label_sent
both_in_preds.append(both_in_pred)
both_in_labels.append(both_in_label)
partial_match = False
exact_match = False
match: List[Union[str, bool]] = []
if both_in_pred and both_in_label:
for p, l in zip(simple_pred_sent, simple_label_sent):
if p == l:
match.append(p)
else:
match.append(False)
if "TERM" in match and "DEF" in match:
partial_match = True
if False not in match:
exact_match = True
partial_matches.append(partial_match)
exact_matches.append(exact_match)
count_both_in_preds = sum(both_in_preds) # N
count_both_in_labels = sum(both_in_labels) # M
count_partial_matches = sum(partial_matches) # P
count_exact_matches = sum(exact_matches) # E
partial_precision = count_partial_matches / count_both_in_preds
partial_recall = count_partial_matches / count_both_in_labels
partial_fscore = (
2 * partial_precision * partial_recall / (partial_precision + partial_recall)
)
exact_precision = count_exact_matches / count_both_in_preds
exact_recall = count_exact_matches / count_both_in_labels
exact_fscore = 2 * exact_precision * exact_recall / (exact_precision + exact_recall)
return {
"partial_match_precision": partial_precision,
"partial_match_recall": partial_recall,
"partial_match_f1": partial_fscore,
"exact_match_precision": exact_precision,
"excat_match_recall": exact_recall,
"excat_match_f1": exact_fscore,
}
def get_slot_simple_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Conceptually, define the following new types of ‘virtual tags’
TERM = B-term OR I-Term (ie the union of those two tags)
DEF = B-Def OR I-Def
Now, what are the P,R & F1 numbers for TERM and DEF? (I think these matter because users may just care about accuracy of term and defn matching and the macro averaged scores conflate other things like recall on these metrics and precision on O. Likewise the current macro average treats missing the first word in a definition differently from skipping the last word.
"""
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
# simplify by replacing {B,I}-TERM to TERM and {B,I}-DEF to DEF
simple_preds = simplify_tokens(preds_flattened)
simple_labels = simplify_tokens(labels_flattened)
assert len(simple_preds) == len(simple_labels)
label_names = ["O", "TERM", "DEF"]
p, r, f, s = score(simple_labels, simple_preds, average=None, labels=label_names)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# pprint(per_class)
return {
"slot_merged_TERM_precision": per_class["p"][1],
"slot_merged_TERM_recall": per_class["r"][1],
"slot_merged_TERM_f1": per_class["f"][1],
"slot_merged_DEFINITION_precision": per_class["p"][2],
"slot_merged_DEFINITION_recall": per_class["r"][2],
"slot_merged_DEFINITION_f1": per_class["f"][2],
}
def get_slot_metrics(preds: List[List[str]], labels: List[List[str]]) -> Dict[Any, Any]:
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
macro_f1 = f1_score(labels_flattened, preds_flattened, average="macro")
micro_f1 = f1_score(labels_flattened, preds_flattened, average="micro")
macro_p = precision_score(labels_flattened, preds_flattened, average="macro")
micro_p = precision_score(labels_flattened, preds_flattened, average="micro")
macro_r = recall_score(labels_flattened, preds_flattened, average="macro")
micro_r = recall_score(labels_flattened, preds_flattened, average="micro")
label_names = ["O", "B-TERM", "I-TERM", "B-DEF", "I-DEF"]
p, r, f, s = score(
labels_flattened, preds_flattened, average=None, labels=label_names
)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# print(per_class)
return {
"slot_precision_macro": macro_p,
"slot_recall_macro": macro_r,
"slot_f1_macro": macro_f1,
"slot_precision_micro": micro_p,
"slot_recall_micro": micro_r,
"slot_f1_micro": micro_f1,
"slot_precision_per_label": per_class["p"],
"slot_recal_per_label": per_class["r"],
"slot_f1_per_label": per_class["f"],
"slot_num_per_label": per_class["s"],
}
def get_intent_acc(preds: List[str], labels: List[str]) -> Dict[Any, Any]:
acc = (preds == labels).mean()
return {"intent_acc": acc}
def read_prediction_text(args: Any) -> List[str]:
return [
text.strip()
for text in open(
os.path.join(args.pred_dir, args.pred_input_file), "r", encoding="utf-8"
)
]
def get_sentence_frame_acc(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
"""For the cases that intent and all the slots are correct (in one sentence)"""
# Get the intent comparison result
intent_result = intent_preds == intent_labels
# Get the slot comparision result
slot_result = []
for preds, labels in zip(slot_preds, slot_labels):
assert len(preds) == len(labels)
one_sent_result = True
for p, l in zip(preds, labels):
if p != l:
one_sent_result = False
break
slot_result.append(one_sent_result)
slot_result = np.array(slot_result)
sementic_acc = np.multiply(intent_result, slot_result).mean()
return {"sementic_frame_acc": sementic_acc}
| en | 0.907449 | # type: ignore # type: ignore # New metrics added following Dan's request. Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a ‘partial match’ happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are Partial match precision = P/M Partial match recall = P/N # check whether term/def exist together # N # M # P # E Conceptually, define the following new types of ‘virtual tags’ TERM = B-term OR I-Term (ie the union of those two tags) DEF = B-Def OR I-Def Now, what are the P,R & F1 numbers for TERM and DEF? (I think these matter because users may just care about accuracy of term and defn matching and the macro averaged scores conflate other things like recall on these metrics and precision on O. Likewise the current macro average treats missing the first word in a definition differently from skipping the last word. # flatten # simplify by replacing {B,I}-TERM to TERM and {B,I}-DEF to DEF # pprint(per_class) # flatten # print(per_class) For the cases that intent and all the slots are correct (in one sentence) # Get the intent comparison result # Get the slot comparision result | 2.129081 | 2 |
fire/trace.py | nvhoang55/python-fire | 0 | 886 | <reponame>nvhoang55/python-fire<gh_stars>0
# Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module has classes for tracing the execution of a Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action may
be instantiating a class, calling a routine, or accessing a property.
Each action consumes args and results in a new component. The final component
is serialized to stdout by Fire as well as returned by the Fire method. If
a Fire usage error occurs, such as insufficient arguments being provided to call
a function, then that error will be captured in the trace and the final
component will be None.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pipes
from fire import inspectutils
INITIAL_COMPONENT = 'Initial component'
INSTANTIATED_CLASS = 'Instantiated class'
CALLED_ROUTINE = 'Called routine'
CALLED_CALLABLE = 'Called callable'
ACCESSED_PROPERTY = 'Accessed property'
COMPLETION_SCRIPT = 'Generated completion script'
INTERACTIVE_MODE = 'Entered interactive mode'
class FireTrace(object):
"""A FireTrace represents the steps taken during a single Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action
may be instantiating a class, calling a routine, or accessing a property.
"""
def __init__(self, initial_component, name=None, separator='-', verbose=False,
show_help=False, show_trace=False):
initial_trace_element = FireTraceElement(
component=initial_component,
action=INITIAL_COMPONENT,
)
self.name = name
self.separator = separator
self.elements = [initial_trace_element]
self.verbose = verbose
self.show_help = show_help
self.show_trace = show_trace
def GetResult(self):
"""Returns the component from the last element of the trace."""
# pytype: disable=attribute-error
return self.GetLastHealthyElement().component
# pytype: enable=attribute-error
def GetLastHealthyElement(self):
"""Returns the last element of the trace that is not an error.
This element will contain the final component indicated by the trace.
Returns:
The last element of the trace that is not an error.
"""
for element in reversed(self.elements):
if not element.HasError():
return element
return None
def HasError(self):
"""Returns whether the Fire execution encountered a Fire usage error."""
return self.elements[-1].HasError()
def AddAccessedProperty(self, component, target, args, filename, lineno):
element = FireTraceElement(
component=component,
action=ACCESSED_PROPERTY,
target=target,
args=args,
filename=filename,
lineno=lineno,
)
self.elements.append(element)
def AddCalledComponent(self, component, target, args, filename, lineno,
capacity, action=CALLED_CALLABLE):
"""Adds an element to the trace indicating that a component was called.
Also applies to instantiating a class.
Args:
component: The result of calling the callable.
target: The name of the callable.
args: The args consumed in order to call this callable.
filename: The file in which the callable is defined, or None if N/A.
lineno: The line number on which the callable is defined, or None if N/A.
capacity: (bool) Whether the callable could have accepted additional args.
action: The value to include as the action in the FireTraceElement.
"""
element = FireTraceElement(
component=component,
action=action,
target=target,
args=args,
filename=filename,
lineno=lineno,
capacity=capacity,
)
self.elements.append(element)
def AddCompletionScript(self, script):
element = FireTraceElement(
component=script,
action=COMPLETION_SCRIPT,
)
self.elements.append(element)
def AddInteractiveMode(self):
element = FireTraceElement(action=INTERACTIVE_MODE)
self.elements.append(element)
def AddError(self, error, args):
element = FireTraceElement(error=error, args=args)
self.elements.append(element)
def AddSeparator(self):
"""Marks that the most recent element of the trace used a separator.
A separator is an argument you can pass to a Fire CLI to separate args left
of the separator from args right of the separator.
Here's an example to demonstrate the separator. Let's say you have a
function that takes a variable number of args, and you want to call that
function, and then upper case the result. Here's how to do it:
# in Python
def display(arg1, arg2='!'):
return arg1 + arg2
# from Bash (the default separator is the hyphen -)
display hello # hello!
display hello upper # helloupper
display hello - upper # HELLO!
Note how the separator caused the display function to be called with the
default value for arg2.
"""
self.elements[-1].AddSeparator()
def _Quote(self, arg):
if arg.startswith('--') and '=' in arg:
prefix, value = arg.split('=', 1)
return pipes.quote(prefix) + '=' + pipes.quote(value)
return pipes.quote(arg)
def GetCommand(self, include_separators=True):
"""Returns the command representing the trace up to this point.
Args:
include_separators: Whether or not to include separators in the command.
Returns:
A string representing a Fire CLI command that would produce this trace.
"""
args = []
if self.name:
args.append(self.name)
for element in self.elements:
if element.HasError():
continue
if element.args:
args.extend(element.args)
if element.HasSeparator() and include_separators:
args.append(self.separator)
if self.NeedsSeparator() and include_separators:
args.append(self.separator)
return ' '.join(self._Quote(arg) for arg in args)
def NeedsSeparator(self):
"""Returns whether a separator should be added to the command.
If the command is a function call, then adding an additional argument to the
command sometimes would add an extra arg to the function call, and sometimes
would add an arg acting on the result of the function call.
This function tells us whether we should add a separator to the command
before adding additional arguments in order to make sure the arg is applied
to the result of the function call, and not the function call itself.
Returns:
Whether a separator should be added to the command if order to keep the
component referred to by the command the same when adding additional args.
"""
element = self.GetLastHealthyElement()
return element.HasCapacity() and not element.HasSeparator()
def __str__(self):
lines = []
for index, element in enumerate(self.elements):
line = '{index}. {trace_string}'.format(
index=index + 1,
trace_string=element,
)
lines.append(line)
return '\n'.join(lines)
def NeedsSeparatingHyphenHyphen(self, flag='help'):
"""Returns whether a the trace need '--' before '--help'.
'--' is needed when the component takes keyword arguments, when the value of
flag matches one of the argument of the component, or the component takes in
keyword-only arguments(e.g. argument with default value).
Args:
flag: the flag available for the trace
Returns:
True for needed '--', False otherwise.
"""
element = self.GetLastHealthyElement()
component = element.component
spec = inspectutils.GetFullArgSpec(component)
return (spec.varkw is not None
or flag in spec.args
or flag in spec.kwonlyargs)
class FireTraceElement(object):
"""A FireTraceElement represents a single step taken by a Fire execution.
Examples of a FireTraceElement are the instantiation of a class or the
accessing of an object member.
"""
def __init__(self,
component=None,
action=None,
target=None,
args=None,
filename=None,
lineno=None,
error=None,
capacity=None):
"""Instantiates a FireTraceElement.
Args:
component: The result of this element of the trace.
action: The type of action (eg instantiating a class) taking place.
target: (string) The name of the component being acted upon.
args: The args consumed by the represented action.
filename: The file in which the action is defined, or None if N/A.
lineno: The line number on which the action is defined, or None if N/A.
error: The error represented by the action, or None if N/A.
capacity: (bool) Whether the action could have accepted additional args.
"""
self.component = component
self._action = action
self._target = target
self.args = args
self._filename = filename
self._lineno = lineno
self._error = error
self._separator = False
self._capacity = capacity
def HasError(self):
return self._error is not None
def HasCapacity(self):
return self._capacity
def HasSeparator(self):
return self._separator
def AddSeparator(self):
self._separator = True
def ErrorAsStr(self):
return ' '.join(str(arg) for arg in self._error.args)
def __str__(self):
if self.HasError():
return self.ErrorAsStr()
else:
# Format is: {action} "{target}" ({filename}:{lineno})
string = self._action
if self._target is not None:
string += ' "{target}"'.format(target=self._target)
if self._filename is not None:
path = self._filename
if self._lineno is not None:
path += ':{lineno}'.format(lineno=self._lineno)
string += ' ({path})'.format(path=path)
return string
| # Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module has classes for tracing the execution of a Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action may
be instantiating a class, calling a routine, or accessing a property.
Each action consumes args and results in a new component. The final component
is serialized to stdout by Fire as well as returned by the Fire method. If
a Fire usage error occurs, such as insufficient arguments being provided to call
a function, then that error will be captured in the trace and the final
component will be None.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pipes
from fire import inspectutils
INITIAL_COMPONENT = 'Initial component'
INSTANTIATED_CLASS = 'Instantiated class'
CALLED_ROUTINE = 'Called routine'
CALLED_CALLABLE = 'Called callable'
ACCESSED_PROPERTY = 'Accessed property'
COMPLETION_SCRIPT = 'Generated completion script'
INTERACTIVE_MODE = 'Entered interactive mode'
class FireTrace(object):
"""A FireTrace represents the steps taken during a single Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action
may be instantiating a class, calling a routine, or accessing a property.
"""
def __init__(self, initial_component, name=None, separator='-', verbose=False,
show_help=False, show_trace=False):
initial_trace_element = FireTraceElement(
component=initial_component,
action=INITIAL_COMPONENT,
)
self.name = name
self.separator = separator
self.elements = [initial_trace_element]
self.verbose = verbose
self.show_help = show_help
self.show_trace = show_trace
def GetResult(self):
"""Returns the component from the last element of the trace."""
# pytype: disable=attribute-error
return self.GetLastHealthyElement().component
# pytype: enable=attribute-error
def GetLastHealthyElement(self):
"""Returns the last element of the trace that is not an error.
This element will contain the final component indicated by the trace.
Returns:
The last element of the trace that is not an error.
"""
for element in reversed(self.elements):
if not element.HasError():
return element
return None
def HasError(self):
"""Returns whether the Fire execution encountered a Fire usage error."""
return self.elements[-1].HasError()
def AddAccessedProperty(self, component, target, args, filename, lineno):
element = FireTraceElement(
component=component,
action=ACCESSED_PROPERTY,
target=target,
args=args,
filename=filename,
lineno=lineno,
)
self.elements.append(element)
def AddCalledComponent(self, component, target, args, filename, lineno,
capacity, action=CALLED_CALLABLE):
"""Adds an element to the trace indicating that a component was called.
Also applies to instantiating a class.
Args:
component: The result of calling the callable.
target: The name of the callable.
args: The args consumed in order to call this callable.
filename: The file in which the callable is defined, or None if N/A.
lineno: The line number on which the callable is defined, or None if N/A.
capacity: (bool) Whether the callable could have accepted additional args.
action: The value to include as the action in the FireTraceElement.
"""
element = FireTraceElement(
component=component,
action=action,
target=target,
args=args,
filename=filename,
lineno=lineno,
capacity=capacity,
)
self.elements.append(element)
def AddCompletionScript(self, script):
element = FireTraceElement(
component=script,
action=COMPLETION_SCRIPT,
)
self.elements.append(element)
def AddInteractiveMode(self):
element = FireTraceElement(action=INTERACTIVE_MODE)
self.elements.append(element)
def AddError(self, error, args):
element = FireTraceElement(error=error, args=args)
self.elements.append(element)
def AddSeparator(self):
"""Marks that the most recent element of the trace used a separator.
A separator is an argument you can pass to a Fire CLI to separate args left
of the separator from args right of the separator.
Here's an example to demonstrate the separator. Let's say you have a
function that takes a variable number of args, and you want to call that
function, and then upper case the result. Here's how to do it:
# in Python
def display(arg1, arg2='!'):
return arg1 + arg2
# from Bash (the default separator is the hyphen -)
display hello # hello!
display hello upper # helloupper
display hello - upper # HELLO!
Note how the separator caused the display function to be called with the
default value for arg2.
"""
self.elements[-1].AddSeparator()
def _Quote(self, arg):
if arg.startswith('--') and '=' in arg:
prefix, value = arg.split('=', 1)
return pipes.quote(prefix) + '=' + pipes.quote(value)
return pipes.quote(arg)
def GetCommand(self, include_separators=True):
"""Returns the command representing the trace up to this point.
Args:
include_separators: Whether or not to include separators in the command.
Returns:
A string representing a Fire CLI command that would produce this trace.
"""
args = []
if self.name:
args.append(self.name)
for element in self.elements:
if element.HasError():
continue
if element.args:
args.extend(element.args)
if element.HasSeparator() and include_separators:
args.append(self.separator)
if self.NeedsSeparator() and include_separators:
args.append(self.separator)
return ' '.join(self._Quote(arg) for arg in args)
def NeedsSeparator(self):
"""Returns whether a separator should be added to the command.
If the command is a function call, then adding an additional argument to the
command sometimes would add an extra arg to the function call, and sometimes
would add an arg acting on the result of the function call.
This function tells us whether we should add a separator to the command
before adding additional arguments in order to make sure the arg is applied
to the result of the function call, and not the function call itself.
Returns:
Whether a separator should be added to the command if order to keep the
component referred to by the command the same when adding additional args.
"""
element = self.GetLastHealthyElement()
return element.HasCapacity() and not element.HasSeparator()
def __str__(self):
lines = []
for index, element in enumerate(self.elements):
line = '{index}. {trace_string}'.format(
index=index + 1,
trace_string=element,
)
lines.append(line)
return '\n'.join(lines)
def NeedsSeparatingHyphenHyphen(self, flag='help'):
"""Returns whether a the trace need '--' before '--help'.
'--' is needed when the component takes keyword arguments, when the value of
flag matches one of the argument of the component, or the component takes in
keyword-only arguments(e.g. argument with default value).
Args:
flag: the flag available for the trace
Returns:
True for needed '--', False otherwise.
"""
element = self.GetLastHealthyElement()
component = element.component
spec = inspectutils.GetFullArgSpec(component)
return (spec.varkw is not None
or flag in spec.args
or flag in spec.kwonlyargs)
class FireTraceElement(object):
"""A FireTraceElement represents a single step taken by a Fire execution.
Examples of a FireTraceElement are the instantiation of a class or the
accessing of an object member.
"""
def __init__(self,
component=None,
action=None,
target=None,
args=None,
filename=None,
lineno=None,
error=None,
capacity=None):
"""Instantiates a FireTraceElement.
Args:
component: The result of this element of the trace.
action: The type of action (eg instantiating a class) taking place.
target: (string) The name of the component being acted upon.
args: The args consumed by the represented action.
filename: The file in which the action is defined, or None if N/A.
lineno: The line number on which the action is defined, or None if N/A.
error: The error represented by the action, or None if N/A.
capacity: (bool) Whether the action could have accepted additional args.
"""
self.component = component
self._action = action
self._target = target
self.args = args
self._filename = filename
self._lineno = lineno
self._error = error
self._separator = False
self._capacity = capacity
def HasError(self):
return self._error is not None
def HasCapacity(self):
return self._capacity
def HasSeparator(self):
return self._separator
def AddSeparator(self):
self._separator = True
def ErrorAsStr(self):
return ' '.join(str(arg) for arg in self._error.args)
def __str__(self):
if self.HasError():
return self.ErrorAsStr()
else:
# Format is: {action} "{target}" ({filename}:{lineno})
string = self._action
if self._target is not None:
string += ' "{target}"'.format(target=self._target)
if self._filename is not None:
path = self._filename
if self._lineno is not None:
path += ':{lineno}'.format(lineno=self._lineno)
string += ' ({path})'.format(path=path)
return string | en | 0.833206 | # Copyright (C) 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This module has classes for tracing the execution of a Fire execution. A FireTrace consists of a sequence of FireTraceElement objects. Each element represents an action taken by Fire during a single Fire execution. An action may be instantiating a class, calling a routine, or accessing a property. Each action consumes args and results in a new component. The final component is serialized to stdout by Fire as well as returned by the Fire method. If a Fire usage error occurs, such as insufficient arguments being provided to call a function, then that error will be captured in the trace and the final component will be None. A FireTrace represents the steps taken during a single Fire execution. A FireTrace consists of a sequence of FireTraceElement objects. Each element represents an action taken by Fire during a single Fire execution. An action may be instantiating a class, calling a routine, or accessing a property. Returns the component from the last element of the trace. # pytype: disable=attribute-error # pytype: enable=attribute-error Returns the last element of the trace that is not an error. This element will contain the final component indicated by the trace. Returns: The last element of the trace that is not an error. Returns whether the Fire execution encountered a Fire usage error. Adds an element to the trace indicating that a component was called. Also applies to instantiating a class. Args: component: The result of calling the callable. target: The name of the callable. args: The args consumed in order to call this callable. filename: The file in which the callable is defined, or None if N/A. lineno: The line number on which the callable is defined, or None if N/A. capacity: (bool) Whether the callable could have accepted additional args. action: The value to include as the action in the FireTraceElement. Marks that the most recent element of the trace used a separator. A separator is an argument you can pass to a Fire CLI to separate args left of the separator from args right of the separator. Here's an example to demonstrate the separator. Let's say you have a function that takes a variable number of args, and you want to call that function, and then upper case the result. Here's how to do it: # in Python def display(arg1, arg2='!'): return arg1 + arg2 # from Bash (the default separator is the hyphen -) display hello # hello! display hello upper # helloupper display hello - upper # HELLO! Note how the separator caused the display function to be called with the default value for arg2. Returns the command representing the trace up to this point. Args: include_separators: Whether or not to include separators in the command. Returns: A string representing a Fire CLI command that would produce this trace. Returns whether a separator should be added to the command. If the command is a function call, then adding an additional argument to the command sometimes would add an extra arg to the function call, and sometimes would add an arg acting on the result of the function call. This function tells us whether we should add a separator to the command before adding additional arguments in order to make sure the arg is applied to the result of the function call, and not the function call itself. Returns: Whether a separator should be added to the command if order to keep the component referred to by the command the same when adding additional args. Returns whether a the trace need '--' before '--help'. '--' is needed when the component takes keyword arguments, when the value of flag matches one of the argument of the component, or the component takes in keyword-only arguments(e.g. argument with default value). Args: flag: the flag available for the trace Returns: True for needed '--', False otherwise. A FireTraceElement represents a single step taken by a Fire execution. Examples of a FireTraceElement are the instantiation of a class or the accessing of an object member. Instantiates a FireTraceElement. Args: component: The result of this element of the trace. action: The type of action (eg instantiating a class) taking place. target: (string) The name of the component being acted upon. args: The args consumed by the represented action. filename: The file in which the action is defined, or None if N/A. lineno: The line number on which the action is defined, or None if N/A. error: The error represented by the action, or None if N/A. capacity: (bool) Whether the action could have accepted additional args. # Format is: {action} "{target}" ({filename}:{lineno}) | 2.273076 | 2 |
test/unit/__init__.py | thiagodasilva/swift | 0 | 887 | <filename>test/unit/__init__.py
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swift tests """
from __future__ import print_function
import os
import copy
import logging
import errno
from six.moves import range
import sys
from contextlib import contextmanager, closing
from collections import defaultdict, Iterable
import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
from swift.common.utils import Timestamp, NOTICE
from test import get_config
from swift.common import swob, utils
from swift.common.ring import Ring, RingData
from hashlib import md5
import logging.handlers
from six.moves.http_client import HTTPException
from swift.common import storage_policy
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
VALID_EC_TYPES)
import functools
import six.moves.cPickle as pickle
from gzip import GzipFile
import mock as mocklib
import inspect
EMPTY_ETAG = md5().hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
# never patch HASH_PATH_SUFFIX AGAIN!
utils.HASH_PATH_SUFFIX = 'endcap'
EC_TYPE_PREFERENCE = [
'liberasurecode_rs_vand',
'jerasure_rs_vand',
]
for eclib_name in EC_TYPE_PREFERENCE:
if eclib_name in VALID_EC_TYPES:
break
else:
raise SystemExit('ERROR: unable to find suitable PyECLib type'
' (none of %r found in %r)' % (
EC_TYPE_PREFERENCE,
VALID_EC_TYPES,
))
DEFAULT_TEST_EC_TYPE = eclib_name
def patch_policies(thing_or_policies=None, legacy_only=False,
with_ec_default=False, fake_ring_args=None):
if isinstance(thing_or_policies, (
Iterable, storage_policy.StoragePolicyCollection)):
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
if legacy_only:
default_policies = [
StoragePolicy(0, name='legacy', is_default=True),
]
default_ring_args = [{}]
elif with_ec_default:
default_policies = [
ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{}, {}]
fake_ring_args = fake_ring_args or default_ring_args
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
if not thing_or_policies:
return decorator
else:
# it's a thing, we return the wrapped thing instead of the decorator
return decorator(thing_or_policies)
class PatchPolicies(object):
"""
Why not mock.patch? In my case, when used as a decorator on the class it
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
patched yet)
"""
def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
def _setup_rings(self):
"""
Our tests tend to use the policies rings like their own personal
playground - which can be a problem in the particular case of a
patched TestCase class where the FakeRing objects are scoped in the
call to the patch_policies wrapper outside of the TestCase instance
which can lead to some bled state.
To help tests get better isolation without having to think about it,
here we're capturing the args required to *build* a new FakeRing
instances so we can ensure each test method gets a clean ring setup.
The TestCase can always "tweak" these fresh rings in setUp - or if
they'd prefer to get the same "reset" behavior with custom FakeRing's
they can pass in their own fake_ring_args to patch_policies instead of
setting the object_ring on the policy definitions.
"""
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
if fake_ring_arg is not None:
policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
return self._patch_class(thing)
else:
return self._patch_method(thing)
def _patch_class(self, cls):
"""
Creating a new class that inherits from decorated class is the more
common way I've seen class decorators done - but it seems to cause
infinite recursion when super is called from inside methods in the
decorated class.
"""
orig_setUp = cls.setUp
orig_tearDown = cls.tearDown
def setUp(cls_self):
self._orig_POLICIES = storage_policy._POLICIES
if not getattr(cls_self, '_policies_patched', False):
storage_policy._POLICIES = self.policies
self._setup_rings()
cls_self._policies_patched = True
orig_setUp(cls_self)
def tearDown(cls_self):
orig_tearDown(cls_self)
storage_policy._POLICIES = self._orig_POLICIES
cls.setUp = setUp
cls.tearDown = tearDown
return cls
def _patch_method(self, f):
@functools.wraps(f)
def mywrapper(*args, **kwargs):
self._orig_POLICIES = storage_policy._POLICIES
try:
storage_policy._POLICIES = self.policies
self._setup_rings()
return f(*args, **kwargs)
finally:
storage_policy._POLICIES = self._orig_POLICIES
return mywrapper
def __enter__(self):
self._orig_POLICIES = storage_policy._POLICIES
storage_policy._POLICIES = self.policies
def __exit__(self, *args):
storage_policy._POLICIES = self._orig_POLICIES
class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000):
"""
:param part_power: make part calculation based on the path
If you set a part_power when you setup your FakeRing the parts you get
out of ring methods will actually be based on the path - otherwise we
exercise the real ring code, but ignore the result and return 1.
"""
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.set_replicas(replicas)
self._reload()
def _reload(self):
self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
self._devs = []
for x in range(self.replicas):
ip = '10.0.0.%s' % x
port = self._base_port + x
self._devs.append({
'ip': ip,
'replication_ip': ip,
'port': port,
'replication_port': port,
'device': 'sd' + (chr(ord('a') + x)),
'zone': x % 3,
'region': x % 2,
'id': x,
})
@property
def replica_count(self):
return self.replicas
def _get_part_nodes(self, part):
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
for x in range(self.replicas, (self.replicas + self.max_more_nodes)):
yield {'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': self._base_port + x,
'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6000}
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6000}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
class FabricatedRing(Ring):
"""
When a FakeRing just won't do - you can fabricate one to meet
your tests needs.
"""
def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
part_power=4):
self.devices = devices
self.nodes = nodes
self.port = port
self.replicas = 6
self.part_power = part_power
self._part_shift = 32 - self.part_power
self._reload()
def _reload(self, *args, **kwargs):
self._rtime = time.time() * 2
if hasattr(self, '_replica2part2dev_id'):
return
self._devs = [{
'region': 1,
'zone': 1,
'weight': 1.0,
'id': i,
'device': 'sda%d' % i,
'ip': '10.0.0.%d' % (i % self.nodes),
'replication_ip': '10.0.0.%d' % (i % self.nodes),
'port': self.port,
'replication_port': self.port,
} for i in range(self.devices)]
self._replica2part2dev_id = [
[None] * 2 ** self.part_power
for i in range(self.replicas)
]
dev_ids = itertools.cycle(range(self.devices))
for p in range(2 ** self.part_power):
for r in range(self.replicas):
self._replica2part2dev_id[r][p] = next(dev_ids)
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def keys(self):
return self.store.keys()
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def readuntil2crlfs(fd):
rv = ''
lc = ''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == '\r' and lc != '\n':
crlfs = 0
if lc == '\r' and c == '\n':
crlfs += 1
lc = c
return rv
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class UnmockTimeModule(object):
"""
Even if a test mocks time.time - you can restore unmolested behavior in a
another module who imports time directly by monkey patching it's imported
reference to the module with an instance of this class
"""
_orig_time = time.time
def __getattribute__(self, name):
if name == 'time':
return UnmockTimeModule._orig_time
return getattr(time, name)
# logging.LogRecord.__init__ calls time.time
logging.time = UnmockTimeModule()
class FakeLogger(logging.Logger, object):
# a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
self.name = 'swift.unit.fake_logger'
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
self.parent = None
store_in = {
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.CRITICAL: 'critical',
NOTICE: 'notice',
}
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _log(self, level, msg, *args, **kwargs):
store_name = self.store_in[level]
cargs = [msg]
if any(args):
cargs.extend(args)
captured = dict(kwargs)
if 'exc_info' in kwargs and \
not isinstance(kwargs['exc_info'], tuple):
captured['exc_info'] = sys.exc_info()
self.log_dict[store_name].append((tuple(cargs), captured))
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
"Invalid log level '%s'; valid levels are %s" %
(level,
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
def all_log_lines(self):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
def _store_in(store_name):
def stub_fn(self, *args, **kwargs):
self.log_dict[store_name].append((args, kwargs))
return stub_fn
# mock out the StatsD logging methods:
update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
counts = {}
for metric in self.get_increments():
if metric not in counts:
counts[metric] = 0
counts[metric] += 1
return counts
def setFormatter(self, obj):
self.formatter = obj
def close(self):
self._clear()
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def _handle(self, record):
try:
line = record.getMessage()
except TypeError:
print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args))
raise
self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
def flush(self):
pass
def handleError(self, record):
pass
class DebugLogger(FakeLogger):
"""A simple stdout logging version of FakeLogger"""
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
self.formatter = logging.Formatter(
"%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print(self.formatter.format(record))
class DebugLogAdapter(utils.LogAdapter):
def _send_to_logger(name):
def stub_fn(self, *args, **kwargs):
return getattr(self.logger, name)(*args, **kwargs)
return stub_fn
# delegate to FakeLogger's mocks
update_stats = _send_to_logger('update_stats')
increment = _send_to_logger('increment')
decrement = _send_to_logger('decrement')
timing = _send_to_logger('timing')
timing_since = _send_to_logger('timing_since')
transfer_rate = _send_to_logger('transfer_rate')
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if utils.config_true_value(
get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
class MockTrue(object):
"""
Instances of MockTrue evaluate like True
Any attr accessed on an instance of MockTrue will return a MockTrue
instance. Any method called on an instance of MockTrue will return
a MockTrue instance.
>>> thing = MockTrue()
>>> thing
True
>>> thing == True # True == True
True
>>> thing == False # True == False
False
>>> thing != True # True != True
False
>>> thing != False # True != False
True
>>> thing.attribute
True
>>> thing.method()
True
>>> thing.attribute.method()
True
>>> thing.method().attribute
True
"""
def __getattribute__(self, *args, **kwargs):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(*args, **kwargs):
return repr(True)
def __eq__(self, other):
return other is True
def __ne__(self, other):
return other is not True
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
try:
yield True
finally:
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
class FakeStatus(object):
"""
This will work with our fake_http_connect, if you hand in one of these
instead of a status int or status int tuple to the "codes" iter you can
add some eventlet sleep to the expect and response stages of the
connection.
"""
def __init__(self, status, expect_sleep=None, response_sleep=None):
"""
:param status: the response status int, or a tuple of
([expect_status, ...], response_status)
:param expect_sleep: float, time to eventlet sleep during expect, can
be a iter of floats
:param response_sleep: float, time to eventlet sleep during response
"""
# connect exception
if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
self.expect_status = list(status[:-1])
self.status = status[-1]
self.explicit_expect_list = True
else:
self.expect_status, self.status = ([], status)
self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
# respond with that status line immediately instead of 100
# Continue, even if the client sent the Expect 100 header.
# BufferedHttp and the proxy both see these error statuses
# when they call getexpect, so our FakeConn tries to act like
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
self.expect_status = [status]
else:
self.expect_status = [100, 100]
# setup sleep attributes
if not isinstance(expect_sleep, (list, tuple)):
expect_sleep = [expect_sleep] * len(self.expect_status)
self.expect_sleep_list = list(expect_sleep)
while len(self.expect_sleep_list) < len(self.expect_status):
self.expect_sleep_list.append(None)
self.response_sleep = response_sleep
def get_response_status(self):
if self.response_sleep is not None:
eventlet.sleep(self.response_sleep)
if self.expect_status and self.explicit_expect_list:
raise Exception('Test did not consume all fake '
'expect status: %r' % (self.expect_status,))
if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
return self.status
def get_expect_status(self):
expect_sleep = self.expect_sleep_list.pop(0)
if expect_sleep is not None:
eventlet.sleep(expect_sleep)
expect_status = self.expect_status.pop(0)
if isinstance(expect_status, (Exception, eventlet.Timeout)):
raise expect_status
return expect_status
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
instead of strings it will make reads take longer by the given
amount. It should be a little bit easier to extend than the
current slow kwarg - which inserts whitespace in the response.
Also it should be easy to detect if you have one of these (or a
subclass) for the body inside of FakeConn if we wanted to do
something smarter than just duck-type the str/buffer api
enough to get by.
"""
def __init__(self, body, slowness):
self.body = body
self.slowness = slowness
def slowdown(self):
eventlet.sleep(self.slowness)
def __getitem__(self, s):
return SlowBody(self.body[s], self.slowness)
def __len__(self):
return len(self.body)
def __radd__(self, other):
self.slowdown()
return other + self.body
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None, expect_headers=None, connection_id=None,
give_send=None):
if not isinstance(status, FakeStatus):
status = FakeStatus(status)
self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.expect_headers = expect_headers or {}
self.timestamp = timestamp
self.connection_id = connection_id
self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
# be nice to trixy bits with node_iter's
eventlet.sleep()
def getresponse(self):
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
self.status = self._status.get_response_status()
return self
def getexpect(self):
expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status
return response
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, str):
etag = '"' + md5(self.body).hexdigest() + '"'
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = swob.HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'x-backend-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes',
})
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
# when timestamp is None, HeaderKeyDict raises KeyError
headers.pop('x-timestamp', None)
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
am_slow, value = self.get_slow()
if am_slow:
headers['content-length'] = '4'
headers.update(self.headers)
return headers.items()
def get_slow(self):
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
if self._next_sleep is not None:
return True, self._next_sleep
else:
return False, 0.01
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
return True, kwargs['slow']
return bool(kwargs.get('slow')), 0.1
def read(self, amt=None):
am_slow, value = self.get_slow()
if am_slow:
if self.sent < 4:
self.sent += 1
eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if self.give_send:
self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
expect_headers_iter = iter(kwargs['expect_headers'])
else:
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
i, status = next(conn_id_and_code_iter)
if 'give_connect' in kwargs:
give_conn_fn = kwargs['give_connect']
argspec = inspect.getargspec(give_conn_fn)
if argspec.keywords or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
give_conn_fn(*args, **ckwargs)
etag = next(etag_iter)
headers = next(headers_iter)
expect_headers = next(expect_headers_iter)
timestamp = next(timestamps_iter)
if status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or ''
else:
body = next(body_iter)
return FakeConn(status, etag, body=body, timestamp=timestamp,
headers=headers, expect_headers=expect_headers,
connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
@contextmanager
def mocked_http_conn(*args, **kwargs):
requests = []
def capture_requests(ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
requests.append(req)
kwargs.setdefault('give_connect', capture_requests)
fake_conn = fake_http_connect(*args, **kwargs)
fake_conn.requests = requests
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
def make_timestamp_iter():
return iter(Timestamp(t) for t in itertools.count(int(time.time())))
| <filename>test/unit/__init__.py
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swift tests """
from __future__ import print_function
import os
import copy
import logging
import errno
from six.moves import range
import sys
from contextlib import contextmanager, closing
from collections import defaultdict, Iterable
import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
from swift.common.utils import Timestamp, NOTICE
from test import get_config
from swift.common import swob, utils
from swift.common.ring import Ring, RingData
from hashlib import md5
import logging.handlers
from six.moves.http_client import HTTPException
from swift.common import storage_policy
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
VALID_EC_TYPES)
import functools
import six.moves.cPickle as pickle
from gzip import GzipFile
import mock as mocklib
import inspect
EMPTY_ETAG = md5().hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
# never patch HASH_PATH_SUFFIX AGAIN!
utils.HASH_PATH_SUFFIX = 'endcap'
EC_TYPE_PREFERENCE = [
'liberasurecode_rs_vand',
'jerasure_rs_vand',
]
for eclib_name in EC_TYPE_PREFERENCE:
if eclib_name in VALID_EC_TYPES:
break
else:
raise SystemExit('ERROR: unable to find suitable PyECLib type'
' (none of %r found in %r)' % (
EC_TYPE_PREFERENCE,
VALID_EC_TYPES,
))
DEFAULT_TEST_EC_TYPE = eclib_name
def patch_policies(thing_or_policies=None, legacy_only=False,
with_ec_default=False, fake_ring_args=None):
if isinstance(thing_or_policies, (
Iterable, storage_policy.StoragePolicyCollection)):
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
if legacy_only:
default_policies = [
StoragePolicy(0, name='legacy', is_default=True),
]
default_ring_args = [{}]
elif with_ec_default:
default_policies = [
ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{}, {}]
fake_ring_args = fake_ring_args or default_ring_args
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
if not thing_or_policies:
return decorator
else:
# it's a thing, we return the wrapped thing instead of the decorator
return decorator(thing_or_policies)
class PatchPolicies(object):
"""
Why not mock.patch? In my case, when used as a decorator on the class it
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
patched yet)
"""
def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
def _setup_rings(self):
"""
Our tests tend to use the policies rings like their own personal
playground - which can be a problem in the particular case of a
patched TestCase class where the FakeRing objects are scoped in the
call to the patch_policies wrapper outside of the TestCase instance
which can lead to some bled state.
To help tests get better isolation without having to think about it,
here we're capturing the args required to *build* a new FakeRing
instances so we can ensure each test method gets a clean ring setup.
The TestCase can always "tweak" these fresh rings in setUp - or if
they'd prefer to get the same "reset" behavior with custom FakeRing's
they can pass in their own fake_ring_args to patch_policies instead of
setting the object_ring on the policy definitions.
"""
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
if fake_ring_arg is not None:
policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
return self._patch_class(thing)
else:
return self._patch_method(thing)
def _patch_class(self, cls):
"""
Creating a new class that inherits from decorated class is the more
common way I've seen class decorators done - but it seems to cause
infinite recursion when super is called from inside methods in the
decorated class.
"""
orig_setUp = cls.setUp
orig_tearDown = cls.tearDown
def setUp(cls_self):
self._orig_POLICIES = storage_policy._POLICIES
if not getattr(cls_self, '_policies_patched', False):
storage_policy._POLICIES = self.policies
self._setup_rings()
cls_self._policies_patched = True
orig_setUp(cls_self)
def tearDown(cls_self):
orig_tearDown(cls_self)
storage_policy._POLICIES = self._orig_POLICIES
cls.setUp = setUp
cls.tearDown = tearDown
return cls
def _patch_method(self, f):
@functools.wraps(f)
def mywrapper(*args, **kwargs):
self._orig_POLICIES = storage_policy._POLICIES
try:
storage_policy._POLICIES = self.policies
self._setup_rings()
return f(*args, **kwargs)
finally:
storage_policy._POLICIES = self._orig_POLICIES
return mywrapper
def __enter__(self):
self._orig_POLICIES = storage_policy._POLICIES
storage_policy._POLICIES = self.policies
def __exit__(self, *args):
storage_policy._POLICIES = self._orig_POLICIES
class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000):
"""
:param part_power: make part calculation based on the path
If you set a part_power when you setup your FakeRing the parts you get
out of ring methods will actually be based on the path - otherwise we
exercise the real ring code, but ignore the result and return 1.
"""
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.set_replicas(replicas)
self._reload()
def _reload(self):
self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
self._devs = []
for x in range(self.replicas):
ip = '10.0.0.%s' % x
port = self._base_port + x
self._devs.append({
'ip': ip,
'replication_ip': ip,
'port': port,
'replication_port': port,
'device': 'sd' + (chr(ord('a') + x)),
'zone': x % 3,
'region': x % 2,
'id': x,
})
@property
def replica_count(self):
return self.replicas
def _get_part_nodes(self, part):
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
for x in range(self.replicas, (self.replicas + self.max_more_nodes)):
yield {'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': self._base_port + x,
'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6000}
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6000}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
class FabricatedRing(Ring):
"""
When a FakeRing just won't do - you can fabricate one to meet
your tests needs.
"""
def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
part_power=4):
self.devices = devices
self.nodes = nodes
self.port = port
self.replicas = 6
self.part_power = part_power
self._part_shift = 32 - self.part_power
self._reload()
def _reload(self, *args, **kwargs):
self._rtime = time.time() * 2
if hasattr(self, '_replica2part2dev_id'):
return
self._devs = [{
'region': 1,
'zone': 1,
'weight': 1.0,
'id': i,
'device': 'sda%d' % i,
'ip': '10.0.0.%d' % (i % self.nodes),
'replication_ip': '10.0.0.%d' % (i % self.nodes),
'port': self.port,
'replication_port': self.port,
} for i in range(self.devices)]
self._replica2part2dev_id = [
[None] * 2 ** self.part_power
for i in range(self.replicas)
]
dev_ids = itertools.cycle(range(self.devices))
for p in range(2 ** self.part_power):
for r in range(self.replicas):
self._replica2part2dev_id[r][p] = next(dev_ids)
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def keys(self):
return self.store.keys()
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def readuntil2crlfs(fd):
rv = ''
lc = ''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == '\r' and lc != '\n':
crlfs = 0
if lc == '\r' and c == '\n':
crlfs += 1
lc = c
return rv
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class UnmockTimeModule(object):
"""
Even if a test mocks time.time - you can restore unmolested behavior in a
another module who imports time directly by monkey patching it's imported
reference to the module with an instance of this class
"""
_orig_time = time.time
def __getattribute__(self, name):
if name == 'time':
return UnmockTimeModule._orig_time
return getattr(time, name)
# logging.LogRecord.__init__ calls time.time
logging.time = UnmockTimeModule()
class FakeLogger(logging.Logger, object):
# a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
self.name = 'swift.unit.fake_logger'
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
self.parent = None
store_in = {
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.CRITICAL: 'critical',
NOTICE: 'notice',
}
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _log(self, level, msg, *args, **kwargs):
store_name = self.store_in[level]
cargs = [msg]
if any(args):
cargs.extend(args)
captured = dict(kwargs)
if 'exc_info' in kwargs and \
not isinstance(kwargs['exc_info'], tuple):
captured['exc_info'] = sys.exc_info()
self.log_dict[store_name].append((tuple(cargs), captured))
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
"Invalid log level '%s'; valid levels are %s" %
(level,
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
def all_log_lines(self):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
def _store_in(store_name):
def stub_fn(self, *args, **kwargs):
self.log_dict[store_name].append((args, kwargs))
return stub_fn
# mock out the StatsD logging methods:
update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
counts = {}
for metric in self.get_increments():
if metric not in counts:
counts[metric] = 0
counts[metric] += 1
return counts
def setFormatter(self, obj):
self.formatter = obj
def close(self):
self._clear()
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def _handle(self, record):
try:
line = record.getMessage()
except TypeError:
print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args))
raise
self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
def flush(self):
pass
def handleError(self, record):
pass
class DebugLogger(FakeLogger):
"""A simple stdout logging version of FakeLogger"""
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
self.formatter = logging.Formatter(
"%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print(self.formatter.format(record))
class DebugLogAdapter(utils.LogAdapter):
def _send_to_logger(name):
def stub_fn(self, *args, **kwargs):
return getattr(self.logger, name)(*args, **kwargs)
return stub_fn
# delegate to FakeLogger's mocks
update_stats = _send_to_logger('update_stats')
increment = _send_to_logger('increment')
decrement = _send_to_logger('decrement')
timing = _send_to_logger('timing')
timing_since = _send_to_logger('timing_since')
transfer_rate = _send_to_logger('transfer_rate')
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if utils.config_true_value(
get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
class MockTrue(object):
"""
Instances of MockTrue evaluate like True
Any attr accessed on an instance of MockTrue will return a MockTrue
instance. Any method called on an instance of MockTrue will return
a MockTrue instance.
>>> thing = MockTrue()
>>> thing
True
>>> thing == True # True == True
True
>>> thing == False # True == False
False
>>> thing != True # True != True
False
>>> thing != False # True != False
True
>>> thing.attribute
True
>>> thing.method()
True
>>> thing.attribute.method()
True
>>> thing.method().attribute
True
"""
def __getattribute__(self, *args, **kwargs):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(*args, **kwargs):
return repr(True)
def __eq__(self, other):
return other is True
def __ne__(self, other):
return other is not True
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
try:
yield True
finally:
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
class FakeStatus(object):
"""
This will work with our fake_http_connect, if you hand in one of these
instead of a status int or status int tuple to the "codes" iter you can
add some eventlet sleep to the expect and response stages of the
connection.
"""
def __init__(self, status, expect_sleep=None, response_sleep=None):
"""
:param status: the response status int, or a tuple of
([expect_status, ...], response_status)
:param expect_sleep: float, time to eventlet sleep during expect, can
be a iter of floats
:param response_sleep: float, time to eventlet sleep during response
"""
# connect exception
if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
self.expect_status = list(status[:-1])
self.status = status[-1]
self.explicit_expect_list = True
else:
self.expect_status, self.status = ([], status)
self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
# respond with that status line immediately instead of 100
# Continue, even if the client sent the Expect 100 header.
# BufferedHttp and the proxy both see these error statuses
# when they call getexpect, so our FakeConn tries to act like
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
self.expect_status = [status]
else:
self.expect_status = [100, 100]
# setup sleep attributes
if not isinstance(expect_sleep, (list, tuple)):
expect_sleep = [expect_sleep] * len(self.expect_status)
self.expect_sleep_list = list(expect_sleep)
while len(self.expect_sleep_list) < len(self.expect_status):
self.expect_sleep_list.append(None)
self.response_sleep = response_sleep
def get_response_status(self):
if self.response_sleep is not None:
eventlet.sleep(self.response_sleep)
if self.expect_status and self.explicit_expect_list:
raise Exception('Test did not consume all fake '
'expect status: %r' % (self.expect_status,))
if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
return self.status
def get_expect_status(self):
expect_sleep = self.expect_sleep_list.pop(0)
if expect_sleep is not None:
eventlet.sleep(expect_sleep)
expect_status = self.expect_status.pop(0)
if isinstance(expect_status, (Exception, eventlet.Timeout)):
raise expect_status
return expect_status
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
instead of strings it will make reads take longer by the given
amount. It should be a little bit easier to extend than the
current slow kwarg - which inserts whitespace in the response.
Also it should be easy to detect if you have one of these (or a
subclass) for the body inside of FakeConn if we wanted to do
something smarter than just duck-type the str/buffer api
enough to get by.
"""
def __init__(self, body, slowness):
self.body = body
self.slowness = slowness
def slowdown(self):
eventlet.sleep(self.slowness)
def __getitem__(self, s):
return SlowBody(self.body[s], self.slowness)
def __len__(self):
return len(self.body)
def __radd__(self, other):
self.slowdown()
return other + self.body
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None, expect_headers=None, connection_id=None,
give_send=None):
if not isinstance(status, FakeStatus):
status = FakeStatus(status)
self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.expect_headers = expect_headers or {}
self.timestamp = timestamp
self.connection_id = connection_id
self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
# be nice to trixy bits with node_iter's
eventlet.sleep()
def getresponse(self):
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
self.status = self._status.get_response_status()
return self
def getexpect(self):
expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status
return response
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, str):
etag = '"' + md5(self.body).hexdigest() + '"'
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = swob.HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'x-backend-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes',
})
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
# when timestamp is None, HeaderKeyDict raises KeyError
headers.pop('x-timestamp', None)
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
am_slow, value = self.get_slow()
if am_slow:
headers['content-length'] = '4'
headers.update(self.headers)
return headers.items()
def get_slow(self):
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
if self._next_sleep is not None:
return True, self._next_sleep
else:
return False, 0.01
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
return True, kwargs['slow']
return bool(kwargs.get('slow')), 0.1
def read(self, amt=None):
am_slow, value = self.get_slow()
if am_slow:
if self.sent < 4:
self.sent += 1
eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if self.give_send:
self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
expect_headers_iter = iter(kwargs['expect_headers'])
else:
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
i, status = next(conn_id_and_code_iter)
if 'give_connect' in kwargs:
give_conn_fn = kwargs['give_connect']
argspec = inspect.getargspec(give_conn_fn)
if argspec.keywords or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
give_conn_fn(*args, **ckwargs)
etag = next(etag_iter)
headers = next(headers_iter)
expect_headers = next(expect_headers_iter)
timestamp = next(timestamps_iter)
if status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or ''
else:
body = next(body_iter)
return FakeConn(status, etag, body=body, timestamp=timestamp,
headers=headers, expect_headers=expect_headers,
connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
@contextmanager
def mocked_http_conn(*args, **kwargs):
requests = []
def capture_requests(ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
requests.append(req)
kwargs.setdefault('give_connect', capture_requests)
fake_conn = fake_http_connect(*args, **kwargs)
fake_conn.requests = requests
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
def make_timestamp_iter():
return iter(Timestamp(t) for t in itertools.count(int(time.time())))
| en | 0.876938 | # Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Swift tests # try not to import this module from swift # never patch HASH_PATH_SUFFIX AGAIN! # it's a thing, we return the wrapped thing instead of the decorator Why not mock.patch? In my case, when used as a decorator on the class it seemed to patch setUp at the wrong time (i.e. in setup the global wasn't patched yet) Our tests tend to use the policies rings like their own personal playground - which can be a problem in the particular case of a patched TestCase class where the FakeRing objects are scoped in the call to the patch_policies wrapper outside of the TestCase instance which can lead to some bled state. To help tests get better isolation without having to think about it, here we're capturing the args required to *build* a new FakeRing instances so we can ensure each test method gets a clean ring setup. The TestCase can always "tweak" these fresh rings in setUp - or if they'd prefer to get the same "reset" behavior with custom FakeRing's they can pass in their own fake_ring_args to patch_policies instead of setting the object_ring on the policy definitions. Creating a new class that inherits from decorated class is the more common way I've seen class decorators done - but it seems to cause infinite recursion when super is called from inside methods in the decorated class. :param part_power: make part calculation based on the path If you set a part_power when you setup your FakeRing the parts you get out of ring methods will actually be based on the path - otherwise we exercise the real ring code, but ignore the result and return 1. # 9 total nodes (6 more past the initial 3) is the cap, no matter if # this is set higher, or R^2 for R replicas Pretty much just a two node, two replica, 2 part power ring... When a FakeRing just won't do - you can fabricate one to meet your tests needs. # generate enough contents to fill the files Decorator to give a single test a tempdir as argument to test method. Even if a test mocks time.time - you can restore unmolested behavior in a another module who imports time directly by monkey patching it's imported reference to the module with an instance of this class # logging.LogRecord.__init__ calls time.time # a thread safe fake logger Convenience function for syslog priority LOG_NOTICE. The python logging lvl is set to 25, just above info. SysLogHandler is monkey patched to map this log lvl to the LOG_NOTICE syslog priority. # this is a public interface # mock out the StatsD logging methods: # don't touch _handlers A simple stdout logging version of FakeLogger # delegate to FakeLogger's mocks get a named adapted debug logger Instances of MockTrue evaluate like True Any attr accessed on an instance of MockTrue will return a MockTrue instance. Any method called on an instance of MockTrue will return a MockTrue instance. >>> thing = MockTrue() >>> thing True >>> thing == True # True == True True >>> thing == False # True == False False >>> thing != True # True != True False >>> thing != False # True != False True >>> thing.attribute True >>> thing.method() True >>> thing.attribute.method() True >>> thing.method().attribute True This will work with our fake_http_connect, if you hand in one of these instead of a status int or status int tuple to the "codes" iter you can add some eventlet sleep to the expect and response stages of the connection. :param status: the response status int, or a tuple of ([expect_status, ...], response_status) :param expect_sleep: float, time to eventlet sleep during expect, can be a iter of floats :param response_sleep: float, time to eventlet sleep during response # connect exception # when a swift backend service returns a status before reading # from the body (mostly an error response) eventlet.wsgi will # respond with that status line immediately instead of 100 # Continue, even if the client sent the Expect 100 header. # BufferedHttp and the proxy both see these error statuses # when they call getexpect, so our FakeConn tries to act like # our backend services and return certain types of responses # as expect statuses just like a real backend server would do. # setup sleep attributes This will work with our fake_http_connect, if you hand in these instead of strings it will make reads take longer by the given amount. It should be a little bit easier to extend than the current slow kwarg - which inserts whitespace in the response. Also it should be easy to detect if you have one of these (or a subclass) for the body inside of FakeConn if we wanted to do something smarter than just duck-type the str/buffer api enough to get by. # be nice to trixy bits with node_iter's # when timestamp is None, HeaderKeyDict raises KeyError | 1.778024 | 2 |
fairseq/models/bart/model.py | samsontmr/fairseq | 172 | 888 | <reponame>samsontmr/fairseq<gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
@register_model('bart')
class BARTModel(TransformerModel):
@classmethod
def hub_models(cls):
return {
'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz',
'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz',
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
'--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence'
)
parser.add_argument(
'--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence'
)
parser.add_argument(
'--pooler-dropout', type=float, metavar='D',
help='dropout probability in the masked_lm pooler layers'
)
parser.add_argument(
'--pooler-activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use for pooler layer'
)
@property
def supported_targets(self):
return {'self'}
def forward(
self, src_tokens, src_lengths, prev_output_tokens,
features_only=False, classification_head_name=None, **kwargs
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
**kwargs,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
**kwargs,
)
if classification_head_name is not None:
sentence_representation = x[
src_tokens.eq(self.encoder.dictionary.eos()), :
].view(x.size(0), -1, x.size(-1))[:, -1, :]
x = self.classification_heads[classification_head_name](
sentence_representation
)
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
bpe='gpt2',
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return BARTHubInterface(x['args'], x['task'], x['models'][0])
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
print("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
print(
'WARNING: re-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + '.' if name != '' else ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0)
inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0)
if getattr(self.args, 'load_checkpoint_heads', False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes != self.classification_heads[head_name].out_proj.out_features
or inner_dim != self.classification_heads[head_name].dense.out_features
):
print(
'WARNING: deleting classification head ({}) from checkpoint '
'with different dimensions than current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture('bart', 'bart_large')
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.relu_dropout = getattr(args, 'relu_dropout', 0.)
args.dropout = getattr(args, 'dropout', 0.1)
args.max_target_positions = getattr(args, 'max_target_positions', 1024)
args.max_source_positions = getattr(args, 'max_source_positions', 1024)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', True)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', True)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
@register_model('bart')
class BARTModel(TransformerModel):
@classmethod
def hub_models(cls):
return {
'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz',
'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz',
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
'--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence'
)
parser.add_argument(
'--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence'
)
parser.add_argument(
'--pooler-dropout', type=float, metavar='D',
help='dropout probability in the masked_lm pooler layers'
)
parser.add_argument(
'--pooler-activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use for pooler layer'
)
@property
def supported_targets(self):
return {'self'}
def forward(
self, src_tokens, src_lengths, prev_output_tokens,
features_only=False, classification_head_name=None, **kwargs
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
**kwargs,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
**kwargs,
)
if classification_head_name is not None:
sentence_representation = x[
src_tokens.eq(self.encoder.dictionary.eos()), :
].view(x.size(0), -1, x.size(-1))[:, -1, :]
x = self.classification_heads[classification_head_name](
sentence_representation
)
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
bpe='gpt2',
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return BARTHubInterface(x['args'], x['task'], x['models'][0])
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
print("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
print(
'WARNING: re-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + '.' if name != '' else ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0)
inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0)
if getattr(self.args, 'load_checkpoint_heads', False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes != self.classification_heads[head_name].out_proj.out_features
or inner_dim != self.classification_heads[head_name].dense.out_features
):
print(
'WARNING: deleting classification head ({}) from checkpoint '
'with different dimensions than current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture('bart', 'bart_large')
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.relu_dropout = getattr(args, 'relu_dropout', 0.)
args.dropout = getattr(args, 'dropout', 0.1)
args.max_target_positions = getattr(args, 'max_target_positions', 1024)
args.max_source_positions = getattr(args, 'max_source_positions', 1024)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', True)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', True)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) | en | 0.890949 | # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension # We follow BERT's random weight initialization Register a classification head. # Handle new classification heads present in the state dict. # Copy any newly-added classification heads into the state dict # with their current weights. Head for sentence-level classification tasks. | 2.1471 | 2 |
tracker/view/error.py | cmm1107/arch-security-tracker | 0 | 889 | <gh_stars>0
from binascii import hexlify
from functools import wraps
from logging import error
from os import urandom
from random import randint
from flask import make_response
from flask import render_template
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import Gone
from werkzeug.exceptions import InternalServerError
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.exceptions import NotFound
from config import get_debug_flag
from tracker import tracker
from tracker.symbol import smileys_sad
error_handlers = []
def errorhandler(code_or_exception):
def decorator(func):
error_handlers.append({'func': func, 'code_or_exception': code_or_exception})
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
return decorator
def handle_error(e, code, json=False):
if json:
return {'message': e}, code
return make_response(render_template('error.html',
smiley=smileys_sad[randint(0, len(smileys_sad) - 1)],
text=e,
title='{}'.format(code)), code)
@errorhandler(NotFound.code)
def not_found(e='404: Not Found', json=False):
return handle_error(e if 'check your spelling' not in '{}'.format(e) else '404: Not Found', NotFound.code, json)
@errorhandler(Forbidden.code)
def forbidden(e='403: Forbidden', json=False):
return handle_error(e, Forbidden.code, json)
@errorhandler(MethodNotAllowed.code)
def method_not_allowed(e='405: Method Not Allowed', json=False):
return handle_error(e, MethodNotAllowed.code, json)
@errorhandler(Gone.code)
def gone(e='410: Gone', json=False):
return handle_error(e, Gone.code, json)
@errorhandler(BadRequest.code)
def bad_request(e='400: Bad Request', json=False):
return handle_error(e, BadRequest.code, json)
@errorhandler(Exception)
@errorhandler(InternalServerError.code)
def internal_error(e):
if get_debug_flag():
raise e
code = hexlify(urandom(4)).decode()
error(Exception("Code: {}".format(code), e), exc_info=True)
text = '500: Deep Shit\n{}'.format(code)
return handle_error(text, InternalServerError.code)
| from binascii import hexlify
from functools import wraps
from logging import error
from os import urandom
from random import randint
from flask import make_response
from flask import render_template
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import Gone
from werkzeug.exceptions import InternalServerError
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.exceptions import NotFound
from config import get_debug_flag
from tracker import tracker
from tracker.symbol import smileys_sad
error_handlers = []
def errorhandler(code_or_exception):
def decorator(func):
error_handlers.append({'func': func, 'code_or_exception': code_or_exception})
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
return decorator
def handle_error(e, code, json=False):
if json:
return {'message': e}, code
return make_response(render_template('error.html',
smiley=smileys_sad[randint(0, len(smileys_sad) - 1)],
text=e,
title='{}'.format(code)), code)
@errorhandler(NotFound.code)
def not_found(e='404: Not Found', json=False):
return handle_error(e if 'check your spelling' not in '{}'.format(e) else '404: Not Found', NotFound.code, json)
@errorhandler(Forbidden.code)
def forbidden(e='403: Forbidden', json=False):
return handle_error(e, Forbidden.code, json)
@errorhandler(MethodNotAllowed.code)
def method_not_allowed(e='405: Method Not Allowed', json=False):
return handle_error(e, MethodNotAllowed.code, json)
@errorhandler(Gone.code)
def gone(e='410: Gone', json=False):
return handle_error(e, Gone.code, json)
@errorhandler(BadRequest.code)
def bad_request(e='400: Bad Request', json=False):
return handle_error(e, BadRequest.code, json)
@errorhandler(Exception)
@errorhandler(InternalServerError.code)
def internal_error(e):
if get_debug_flag():
raise e
code = hexlify(urandom(4)).decode()
error(Exception("Code: {}".format(code), e), exc_info=True)
text = '500: Deep Shit\n{}'.format(code)
return handle_error(text, InternalServerError.code) | none | 1 | 2.016932 | 2 |
|
tb_plugin/torch_tb_profiler/profiler/trace.py | azhou-determined/kineto | 0 | 890 | <gh_stars>0
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from enum import IntEnum
from .. import utils
__all__ = ["EventTypes", "create_event"]
logger = utils.get_logger()
DeviceType = IntEnum('DeviceType', ['CPU', 'CUDA'], start=0)
class EventTypes(object):
TRACE = "Trace"
OPERATOR = "Operator"
PROFILER_STEP = "ProfilerStep"
RUNTIME = "Runtime"
KERNEL = "Kernel"
MEMCPY = "Memcpy"
MEMSET = "Memset"
PYTHON = "Python"
MEMORY = "Memory"
Supported_EventTypes = [v for k, v in vars(EventTypes).items() if not k.startswith("_") and v != EventTypes.PROFILER_STEP]
class BaseEvent(object):
def __init__(self, type, data):
self.type = type
self.name = data.get("name")
self.ts = data.get("ts")
self.pid = data.get("pid")
self.tid = data.get("tid")
self.args = data.get("args", {})
class TraceEvent(BaseEvent):
def __init__(self, type, data):
super().__init__(type, data)
self.category = data.get("cat", "")
self.duration = data.get("dur")
@property
def external_id(self):
extern_id = self.args.get("external id")
if extern_id is None:
extern_id = self.args.get("External id")
return extern_id
@property
def callstack(self):
return self.args.get("Call stack", "")
@property
def input_shape(self):
shape = self.args.get("Input Dims")
if shape is None:
shape = self.args.get("Input dims")
return shape
@property
def input_type(self):
return self.args.get("Input type")
class ProfilerStepEvent(TraceEvent):
def __init__(self, data):
super().__init__(EventTypes.PROFILER_STEP, data)
# torch.profiler.profile.step will invoke record_function with name like "ProfilerStep#5"
self.step = int(self.name.split("#")[1])
class MemoryEvent(BaseEvent):
def __init__(self, type, data):
super().__init__(type, data)
self.scope = data.get("s", "")
@property
def device_type(self):
dtype = self.args.get("Device Type")
if dtype is None:
return None
try:
return DeviceType(dtype)
except ValueError:
return None
@property
def device_id(self):
return self.args.get("Device Id")
@property
def bytes(self):
return self.args.get("Bytes", 0)
def create_event(event):
try:
type = event.get("ph")
if type == "X":
return create_trace_event(event)
elif type == "i" and event.get('s') == 't':
return MemoryEvent(EventTypes.MEMORY, event)
else:
return None
except Exception as ex:
logger.warning("Failed to parse profile event. Exception=%s. Event=%s", ex, event, exc_info=True)
raise
def create_trace_event(event):
category = event.get("cat")
if category == "Operator":
name = event.get("name")
if name and name.startswith("ProfilerStep#"):
return ProfilerStepEvent(event)
if category in Supported_EventTypes:
return TraceEvent(category, event)
else:
return None
| # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from enum import IntEnum
from .. import utils
__all__ = ["EventTypes", "create_event"]
logger = utils.get_logger()
DeviceType = IntEnum('DeviceType', ['CPU', 'CUDA'], start=0)
class EventTypes(object):
TRACE = "Trace"
OPERATOR = "Operator"
PROFILER_STEP = "ProfilerStep"
RUNTIME = "Runtime"
KERNEL = "Kernel"
MEMCPY = "Memcpy"
MEMSET = "Memset"
PYTHON = "Python"
MEMORY = "Memory"
Supported_EventTypes = [v for k, v in vars(EventTypes).items() if not k.startswith("_") and v != EventTypes.PROFILER_STEP]
class BaseEvent(object):
def __init__(self, type, data):
self.type = type
self.name = data.get("name")
self.ts = data.get("ts")
self.pid = data.get("pid")
self.tid = data.get("tid")
self.args = data.get("args", {})
class TraceEvent(BaseEvent):
def __init__(self, type, data):
super().__init__(type, data)
self.category = data.get("cat", "")
self.duration = data.get("dur")
@property
def external_id(self):
extern_id = self.args.get("external id")
if extern_id is None:
extern_id = self.args.get("External id")
return extern_id
@property
def callstack(self):
return self.args.get("Call stack", "")
@property
def input_shape(self):
shape = self.args.get("Input Dims")
if shape is None:
shape = self.args.get("Input dims")
return shape
@property
def input_type(self):
return self.args.get("Input type")
class ProfilerStepEvent(TraceEvent):
def __init__(self, data):
super().__init__(EventTypes.PROFILER_STEP, data)
# torch.profiler.profile.step will invoke record_function with name like "ProfilerStep#5"
self.step = int(self.name.split("#")[1])
class MemoryEvent(BaseEvent):
def __init__(self, type, data):
super().__init__(type, data)
self.scope = data.get("s", "")
@property
def device_type(self):
dtype = self.args.get("Device Type")
if dtype is None:
return None
try:
return DeviceType(dtype)
except ValueError:
return None
@property
def device_id(self):
return self.args.get("Device Id")
@property
def bytes(self):
return self.args.get("Bytes", 0)
def create_event(event):
try:
type = event.get("ph")
if type == "X":
return create_trace_event(event)
elif type == "i" and event.get('s') == 't':
return MemoryEvent(EventTypes.MEMORY, event)
else:
return None
except Exception as ex:
logger.warning("Failed to parse profile event. Exception=%s. Event=%s", ex, event, exc_info=True)
raise
def create_trace_event(event):
category = event.get("cat")
if category == "Operator":
name = event.get("name")
if name and name.startswith("ProfilerStep#"):
return ProfilerStepEvent(event)
if category in Supported_EventTypes:
return TraceEvent(category, event)
else:
return None | en | 0.332603 | # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # -------------------------------------------------------------------------- # torch.profiler.profile.step will invoke record_function with name like "ProfilerStep#5" #"): | 2.079454 | 2 |
base/base.gyp | eval1749/elang | 1 | 891 | <reponame>eval1749/elang
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../build/win_precompile.gypi',
'base.gypi',
],
'targets': [
{
'target_name': 'base',
'type': '<(component)',
'toolsets': ['host', 'target'],
'variables': {
'base_target': 1,
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'dependencies': [
'base_static',
'allocator/allocator.gyp:allocator_extension_thunks',
'../testing/gtest.gyp:gtest_prod',
'../third_party/modp_b64/modp_b64.gyp:modp_b64',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 64-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'conditions': [
['desktop_linux == 1 or chromeos == 1', {
'conditions': [
['chromeos==1', {
'sources/': [ ['include', '_chromeos\\.cc$'] ]
}],
],
'dependencies': [
'symbolize',
'xdg_mime',
],
'defines': [
'USE_SYMBOLIZE',
],
}, { # desktop_linux == 0 and chromeos == 0
'sources/': [
['exclude', '/xdg_user_dirs/'],
['exclude', '_nss\\.cc$'],
],
}],
['use_glib==1', {
'dependencies': [
'../build/linux/system.gyp:glib',
],
'export_dependent_settings': [
'../build/linux/system.gyp:glib',
],
}],
['OS == "android" and _toolset == "host"', {
# Always build base as a static_library for host toolset, even if
# we're doing a component build. Specifically, we only care about the
# target toolset using components since that's what developers are
# focusing on. In theory we should do this more generally for all
# targets when building for host, but getting the gyp magic
# per-toolset for the "component" variable is hard, and we really only
# need base on host.
'type': 'static_library',
# Base for host support is the minimum required to run the
# ssl false start blacklist tool. It requires further changes
# to generically support host builds (and tests).
# Note: when building for host, gyp has OS == "android",
# hence the *_android.cc files are included but the actual code
# doesn't have OS_ANDROID / ANDROID defined.
'conditions': [
['host_os == "mac"', {
'sources/': [
['exclude', '^native_library_linux\\.cc$'],
['exclude', '^process_util_linux\\.cc$'],
['exclude', '^sys_info_linux\\.cc$'],
['exclude', '^sys_string_conversions_linux\\.cc$'],
['exclude', '^worker_pool_linux\\.cc$'],
],
}],
],
}],
['OS == "android" and _toolset == "target"', {
'dependencies': [
'base_java',
'base_jni_headers',
'../build/android/ndk.gyp:cpu_features',
'../third_party/ashmem/ashmem.gyp:ashmem',
],
'link_settings': {
'libraries': [
'-llog',
],
},
'sources!': [
'debug/stack_trace_posix.cc',
],
}],
['os_bsd==1', {
'include_dirs': [
'/usr/local/include',
],
'link_settings': {
'libraries': [
'-L/usr/local/lib -lexecinfo',
],
},
}],
['OS == "linux"', {
'link_settings': {
'libraries': [
# We need rt for clock_gettime().
'-lrt',
# For 'native_library_linux.cc'
'-ldl',
],
},
'conditions': [
['use_allocator!="tcmalloc"', {
'defines': [
'NO_TCMALLOC',
],
'direct_dependent_settings': {
'defines': [
'NO_TCMALLOC',
],
},
}],
],
}],
['OS == "win"', {
# Specify delayload for base.dll.
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
# Specify delayload for components that link with base.lib.
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
},
'copies': [
{
'destination': '<(PRODUCT_DIR)/',
'files': [
'../build/win/dbghelp_xp/dbghelp.dll',
],
},
],
'dependencies': [
'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
],
}],
['OS == "mac" or (OS == "ios" and _toolset == "host")', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
'$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework',
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
],
},
}],
['OS == "ios" and _toolset != "host"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreGraphics.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreText.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
],
},
}],
['OS != "win" and (OS != "ios" or _toolset == "host")', {
'dependencies': ['../third_party/libevent/libevent.gyp:libevent'],
},],
['component=="shared_library"', {
'conditions': [
['OS=="win"', {
'sources!': [
'debug/debug_on_start_win.cc',
],
}],
],
}],
['OS=="ios"', {
'sources!': [
'sync_socket.h',
'sync_socket_posix.cc',
]
}],
],
'sources': [
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
'message_loop/message_pump_android.cc',
'message_loop/message_pump_android.h',
'message_loop/message_pump_glib.cc',
'message_loop/message_pump_glib.h',
'message_loop/message_pump_io_ios.cc',
'message_loop/message_pump_io_ios.h',
'message_loop/message_pump_libevent.cc',
'message_loop/message_pump_libevent.h',
'message_loop/message_pump_mac.h',
'message_loop/message_pump_mac.mm',
'metrics/field_trial.cc',
'metrics/field_trial.h',
'posix/file_descriptor_shuffle.cc',
'posix/file_descriptor_shuffle.h',
'sync_socket.h',
'sync_socket_posix.cc',
'sync_socket_win.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_i18n',
'type': '<(component)',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
'base_i18n_target': 1,
},
'dependencies': [
'base',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
],
'conditions': [
['OS == "win"', {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [
4267,
],
}],
['icu_use_data_file_flag==1', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
}, { # else icu_use_data_file_flag !=1
'conditions': [
['OS=="win"', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
}, {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
}],
],
}],
['OS == "ios"', {
'toolsets': ['host', 'target'],
}],
],
'export_dependent_settings': [
'base',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/icu/icu.gyp:icui18n',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_message_loop_tests',
'type': 'static_library',
'dependencies': [
'base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'message_loop/message_loop_test.cc',
'message_loop/message_loop_test.h',
],
},
{
'target_name': 'base_prefs',
'type': '<(component)',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'dependencies': [
'base',
],
'export_dependent_settings': [
'base',
],
'defines': [
'BASE_PREFS_IMPLEMENTATION',
],
'sources': [
'prefs/base_prefs_export.h',
'prefs/default_pref_store.cc',
'prefs/default_pref_store.h',
'prefs/json_pref_store.cc',
'prefs/json_pref_store.h',
'prefs/overlay_user_pref_store.cc',
'prefs/overlay_user_pref_store.h',
'prefs/persistent_pref_store.h',
'prefs/pref_change_registrar.cc',
'prefs/pref_change_registrar.h',
'prefs/pref_filter.h',
'prefs/pref_member.cc',
'prefs/pref_member.h',
'prefs/pref_notifier.h',
'prefs/pref_notifier_impl.cc',
'prefs/pref_notifier_impl.h',
'prefs/pref_observer.h',
'prefs/pref_registry.cc',
'prefs/pref_registry.h',
'prefs/pref_registry_simple.cc',
'prefs/pref_registry_simple.h',
'prefs/pref_service.cc',
'prefs/pref_service.h',
'prefs/pref_service_factory.cc',
'prefs/pref_service_factory.h',
'prefs/pref_store.cc',
'prefs/pref_store.h',
'prefs/pref_value_map.cc',
'prefs/pref_value_map.h',
'prefs/pref_value_store.cc',
'prefs/pref_value_store.h',
'prefs/scoped_user_pref_update.cc',
'prefs/scoped_user_pref_update.h',
'prefs/value_map_pref_store.cc',
'prefs/value_map_pref_store.h',
'prefs/writeable_pref_store.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_prefs_test_support',
'type': 'static_library',
'dependencies': [
'base',
'base_prefs',
'../testing/gmock.gyp:gmock',
],
'sources': [
'prefs/mock_pref_change_callback.cc',
'prefs/pref_store_observer_mock.cc',
'prefs/pref_store_observer_mock.h',
'prefs/testing_pref_service.cc',
'prefs/testing_pref_service.h',
'prefs/testing_pref_store.cc',
'prefs/testing_pref_store.h',
],
},
{
# This is the subset of files from base that should not be used with a
# dynamic library. Note that this library cannot depend on base because
# base depends on base_static.
'target_name': 'base_static',
'type': 'static_library',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'toolsets': ['host', 'target'],
'sources': [
'base_switches.cc',
'base_switches.h',
'win/pe_image.cc',
'win/pe_image.h',
],
'include_dirs': [
'..',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
# Include this target for a main() function that simply instantiates
# and runs a base::TestSuite.
{
'target_name': 'run_all_unittests',
'type': 'static_library',
'dependencies': [
'test_support_base',
],
'sources': [
'test/run_all_unittests.cc',
],
},
{
'target_name': 'base_unittests',
'type': '<(gtest_target_type)',
'sources': [
'android/application_status_listener_unittest.cc',
'android/content_uri_utils_unittest.cc',
'android/jni_android_unittest.cc',
'android/jni_array_unittest.cc',
'android/jni_string_unittest.cc',
'android/library_loader/library_prefetcher_unittest.cc',
'android/path_utils_unittest.cc',
'android/scoped_java_ref_unittest.cc',
'android/sys_utils_unittest.cc',
'at_exit_unittest.cc',
'atomicops_unittest.cc',
'barrier_closure_unittest.cc',
'base64_unittest.cc',
'base64url_unittest.cc',
'big_endian_unittest.cc',
'bind_unittest.cc',
'bind_unittest.nc',
'bits_unittest.cc',
'build_time_unittest.cc',
'callback_helpers_unittest.cc',
'callback_list_unittest.cc',
'callback_list_unittest.nc',
'callback_unittest.cc',
'callback_unittest.nc',
'cancelable_callback_unittest.cc',
'command_line_unittest.cc',
'containers/adapters_unittest.cc',
'containers/hash_tables_unittest.cc',
'containers/linked_list_unittest.cc',
'containers/mru_cache_unittest.cc',
'containers/scoped_ptr_hash_map_unittest.cc',
'containers/small_map_unittest.cc',
'containers/stack_container_unittest.cc',
'cpu_unittest.cc',
'debug/crash_logging_unittest.cc',
'debug/debugger_unittest.cc',
'debug/leak_tracker_unittest.cc',
'debug/proc_maps_linux_unittest.cc',
'debug/stack_trace_unittest.cc',
'debug/task_annotator_unittest.cc',
'deferred_sequenced_task_runner_unittest.cc',
'environment_unittest.cc',
'feature_list_unittest.cc',
'file_version_info_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'files/file_path_unittest.cc',
'files/file_path_watcher_unittest.cc',
'files/file_proxy_unittest.cc',
'files/file_unittest.cc',
'files/file_util_proxy_unittest.cc',
'files/file_util_unittest.cc',
'files/important_file_writer_unittest.cc',
'files/memory_mapped_file_unittest.cc',
'files/scoped_temp_dir_unittest.cc',
'gmock_unittest.cc',
'guid_unittest.cc',
'hash_unittest.cc',
'i18n/break_iterator_unittest.cc',
'i18n/case_conversion_unittest.cc',
'i18n/char_iterator_unittest.cc',
'i18n/file_util_icu_unittest.cc',
'i18n/icu_string_conversions_unittest.cc',
'i18n/message_formatter_unittest.cc',
'i18n/number_formatting_unittest.cc',
'i18n/rtl_unittest.cc',
'i18n/streaming_utf8_validator_unittest.cc',
'i18n/string_search_unittest.cc',
'i18n/time_formatting_unittest.cc',
'i18n/timezone_unittest.cc',
'id_map_unittest.cc',
'ios/crb_protocol_observers_unittest.mm',
'ios/device_util_unittest.mm',
'ios/weak_nsobject_unittest.mm',
'json/json_parser_unittest.cc',
'json/json_reader_unittest.cc',
'json/json_value_converter_unittest.cc',
'json/json_value_serializer_unittest.cc',
'json/json_writer_unittest.cc',
'json/string_escape_unittest.cc',
'lazy_instance_unittest.cc',
'logging_unittest.cc',
'mac/bind_objc_block_unittest.mm',
'mac/call_with_eh_frame_unittest.mm',
'mac/dispatch_source_mach_unittest.cc',
'mac/foundation_util_unittest.mm',
'mac/libdispatch_task_runner_unittest.cc',
'mac/mac_util_unittest.mm',
'mac/objc_property_releaser_unittest.mm',
'mac/scoped_nsobject_unittest.mm',
'mac/scoped_objc_class_swizzler_unittest.mm',
'mac/scoped_sending_event_unittest.mm',
'md5_unittest.cc',
'memory/aligned_memory_unittest.cc',
'memory/discardable_shared_memory_unittest.cc',
'memory/linked_ptr_unittest.cc',
'memory/memory_pressure_listener_unittest.cc',
'memory/memory_pressure_monitor_chromeos_unittest.cc',
'memory/memory_pressure_monitor_mac_unittest.cc',
'memory/memory_pressure_monitor_win_unittest.cc',
'memory/ref_counted_memory_unittest.cc',
'memory/ref_counted_unittest.cc',
'memory/scoped_ptr_unittest.cc',
'memory/scoped_ptr_unittest.nc',
'memory/scoped_vector_unittest.cc',
'memory/shared_memory_unittest.cc',
'memory/shared_memory_mac_unittest.cc',
'memory/singleton_unittest.cc',
'memory/weak_ptr_unittest.cc',
'memory/weak_ptr_unittest.nc',
'message_loop/message_loop_task_runner_unittest.cc',
'message_loop/message_loop_unittest.cc',
'message_loop/message_pump_glib_unittest.cc',
'message_loop/message_pump_io_ios_unittest.cc',
'message_loop/message_pump_libevent_unittest.cc',
'metrics/bucket_ranges_unittest.cc',
'metrics/field_trial_unittest.cc',
'metrics/histogram_base_unittest.cc',
'metrics/histogram_delta_serialization_unittest.cc',
'metrics/histogram_macros_unittest.cc',
'metrics/histogram_snapshot_manager_unittest.cc',
'metrics/histogram_unittest.cc',
'metrics/metrics_hashes_unittest.cc',
'metrics/sample_map_unittest.cc',
'metrics/sample_vector_unittest.cc',
'metrics/sparse_histogram_unittest.cc',
'metrics/statistics_recorder_unittest.cc',
'native_library_unittest.cc',
'numerics/safe_numerics_unittest.cc',
'observer_list_unittest.cc',
'os_compat_android_unittest.cc',
'path_service_unittest.cc',
'pickle_unittest.cc',
'posix/file_descriptor_shuffle_unittest.cc',
'posix/unix_domain_socket_linux_unittest.cc',
'power_monitor/power_monitor_unittest.cc',
'prefs/default_pref_store_unittest.cc',
'prefs/json_pref_store_unittest.cc',
'prefs/mock_pref_change_callback.h',
'prefs/overlay_user_pref_store_unittest.cc',
'prefs/pref_change_registrar_unittest.cc',
'prefs/pref_member_unittest.cc',
'prefs/pref_notifier_impl_unittest.cc',
'prefs/pref_service_unittest.cc',
'prefs/pref_value_map_unittest.cc',
'prefs/pref_value_store_unittest.cc',
'prefs/scoped_user_pref_update_unittest.cc',
'process/memory_unittest.cc',
'process/memory_unittest_mac.h',
'process/memory_unittest_mac.mm',
'process/process_metrics_unittest.cc',
'process/process_metrics_unittest_ios.cc',
'process/process_unittest.cc',
'process/process_util_unittest.cc',
'profiler/stack_sampling_profiler_unittest.cc',
'profiler/tracked_time_unittest.cc',
'rand_util_unittest.cc',
'scoped_clear_errno_unittest.cc',
'scoped_generic_unittest.cc',
'scoped_native_library_unittest.cc',
'security_unittest.cc',
'sequence_checker_unittest.cc',
'sha1_unittest.cc',
'stl_util_unittest.cc',
'strings/nullable_string16_unittest.cc',
'strings/pattern_unittest.cc',
'strings/safe_sprintf_unittest.cc',
'strings/string16_unittest.cc',
'strings/string_number_conversions_unittest.cc',
'strings/string_piece_unittest.cc',
'strings/string_split_unittest.cc',
'strings/string_tokenizer_unittest.cc',
'strings/string_util_unittest.cc',
'strings/stringize_macros_unittest.cc',
'strings/stringprintf_unittest.cc',
'strings/sys_string_conversions_mac_unittest.mm',
'strings/sys_string_conversions_unittest.cc',
'strings/utf_offset_string_conversions_unittest.cc',
'strings/utf_string_conversions_unittest.cc',
'supports_user_data_unittest.cc',
'sync_socket_unittest.cc',
'synchronization/cancellation_flag_unittest.cc',
'synchronization/condition_variable_unittest.cc',
'synchronization/lock_unittest.cc',
'synchronization/waitable_event_unittest.cc',
'synchronization/waitable_event_watcher_unittest.cc',
'sys_info_unittest.cc',
'system_monitor/system_monitor_unittest.cc',
'task/cancelable_task_tracker_unittest.cc',
'task_runner_util_unittest.cc',
'template_util_unittest.cc',
'test/histogram_tester_unittest.cc',
'test/test_pending_task_unittest.cc',
'test/test_reg_util_win_unittest.cc',
'test/trace_event_analyzer_unittest.cc',
'test/user_action_tester_unittest.cc',
'threading/non_thread_safe_unittest.cc',
'threading/platform_thread_unittest.cc',
'threading/sequenced_worker_pool_unittest.cc',
'threading/sequenced_task_runner_handle_unittest.cc',
'threading/simple_thread_unittest.cc',
'threading/thread_checker_unittest.cc',
'threading/thread_collision_warner_unittest.cc',
'threading/thread_id_name_manager_unittest.cc',
'threading/thread_local_storage_unittest.cc',
'threading/thread_local_unittest.cc',
'threading/thread_unittest.cc',
'threading/watchdog_unittest.cc',
'threading/worker_pool_posix_unittest.cc',
'threading/worker_pool_unittest.cc',
'time/pr_time_unittest.cc',
'time/time_unittest.cc',
'time/time_win_unittest.cc',
'timer/hi_res_timer_manager_unittest.cc',
'timer/mock_timer_unittest.cc',
'timer/timer_unittest.cc',
'tools_sanity_unittest.cc',
'tracked_objects_unittest.cc',
'tuple_unittest.cc',
'values_unittest.cc',
'version_unittest.cc',
'vlog_unittest.cc',
'win/dllmain.cc',
'win/enum_variant_unittest.cc',
'win/event_trace_consumer_unittest.cc',
'win/event_trace_controller_unittest.cc',
'win/event_trace_provider_unittest.cc',
'win/i18n_unittest.cc',
'win/iunknown_impl_unittest.cc',
'win/message_window_unittest.cc',
'win/object_watcher_unittest.cc',
'win/pe_image_unittest.cc',
'win/registry_unittest.cc',
'win/scoped_bstr_unittest.cc',
'win/scoped_comptr_unittest.cc',
'win/scoped_handle_unittest.cc',
'win/scoped_process_information_unittest.cc',
'win/scoped_variant_unittest.cc',
'win/shortcut_unittest.cc',
'win/startup_information_unittest.cc',
'win/win_util_unittest.cc',
'win/wrapped_window_proc_unittest.cc',
'<@(trace_event_test_sources)',
],
'dependencies': [
'base',
'base_i18n',
'base_message_loop_tests',
'base_prefs',
'base_prefs_test_support',
'base_static',
'run_all_unittests',
'test_support_base',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
],
'includes': ['../build/nocompile.gypi'],
'variables': {
# TODO(ajwong): Is there a way to autodetect this?
'module_dir': 'base'
},
'conditions': [
['OS == "android"', {
'dependencies': [
'android/jni_generator/jni_generator.gyp:jni_generator_tests',
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
['OS == "ios" and _toolset != "host"', {
'sources/': [
# iOS does not support FilePathWatcher.
['exclude', '^files/file_path_watcher_unittest\\.cc$'],
# Only test the iOS-meaningful portion of memory and process_utils.
['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'],
['exclude', '^memory/shared_memory_unittest\\.cc$'],
['exclude', '^process/memory_unittest'],
['exclude', '^process/process_unittest\\.cc$'],
['exclude', '^process/process_util_unittest\\.cc$'],
['include', '^process/process_util_unittest_ios\\.cc$'],
# iOS does not use message_pump_libevent.
['exclude', '^message_loop/message_pump_libevent_unittest\\.cc$'],
],
'actions': [
{
'action_name': 'copy_test_data',
'variables': {
'test_data_files': [
'test/data',
],
'test_data_prefix': 'base',
},
'includes': [ '../build/copy_test_data_ios.gypi' ],
},
],
}],
['desktop_linux == 1 or chromeos == 1', {
'defines': [
'USE_SYMBOLIZE',
],
'sources!': [
'file_version_info_unittest.cc',
],
'conditions': [
[ 'desktop_linux==1', {
'sources': [
'nix/xdg_util_unittest.cc',
],
}],
],
}],
['use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:glib',
],
}, { # use_glib == 0
'sources!': [
'message_loop/message_pump_glib_unittest.cc',
]
}],
['use_ozone == 1', {
'sources!': [
'message_loop/message_pump_glib_unittest.cc',
]
}],
['OS == "linux"', {
'dependencies': [
'malloc_wrapper',
],
'conditions': [
['use_allocator!="none"', {
'dependencies': [
'allocator/allocator.gyp:allocator',
],
}],
]},
],
[ 'OS == "win" and target_arch == "x64"', {
'sources': [
'profiler/win32_stack_frame_unwinder_unittest.cc',
],
'dependencies': [
'base_profiler_test_support_library',
],
}],
['OS == "win"', {
'sources!': [
'file_descriptor_shuffle_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'message_loop/message_pump_libevent_unittest.cc',
'threading/worker_pool_posix_unittest.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [
4267,
],
'conditions': [
# This is needed so base_unittests uses the allocator shim, as
# SecurityTest.MemoryAllocationRestriction* tests are dependent
# on tcmalloc.
# TODO(wfh): crbug.com/246278 Move tcmalloc specific tests into
# their own test suite.
['win_use_allocator_shim==1', {
'dependencies': [
'allocator/allocator.gyp:allocator',
],
}],
['icu_use_data_file_flag==0', {
# This is needed to trigger the dll copy step on windows.
# TODO(mark): This should not be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
}],
],
}, { # OS != "win"
'dependencies': [
'../third_party/libevent/libevent.gyp:libevent'
],
}],
], # conditions
'target_conditions': [
['OS == "ios" and _toolset != "host"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^mac/bind_objc_block_unittest\\.mm$'],
['include', '^mac/foundation_util_unittest\\.mm$',],
['include', '^mac/objc_property_releaser_unittest\\.mm$'],
['include', '^mac/scoped_nsobject_unittest\\.mm$'],
['include', '^sys_string_conversions_mac_unittest\\.mm$'],
],
}],
['OS == "android"', {
'sources/': [
['include', '^debug/proc_maps_linux_unittest\\.cc$'],
],
}],
# Enable more direct string conversions on platforms with native utf8
# strings
['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
'defines': ['SYSTEM_NATIVE_UTF8'],
}],
# SyncSocket isn't used on iOS
['OS=="ios"', {
'sources!': [
'sync_socket_unittest.cc',
],
}],
], # target_conditions
},
{
# GN: //base:base_perftests
'target_name': 'base_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'base',
'test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'message_loop/message_pump_perftest.cc',
'test/run_all_unittests.cc',
'threading/thread_perftest.cc',
'../testing/perf/perf_test.cc'
],
'conditions': [
['OS == "android"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
],
},
{
# GN: //base:base_i18n_perftests
'target_name': 'base_i18n_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'test_support_base',
'test_support_perf',
'../testing/gtest.gyp:gtest',
'base_i18n',
'base',
],
'sources': [
'i18n/streaming_utf8_validator_perftest.cc',
],
},
{
# GN: //base/test:test_support
'target_name': 'test_support_base',
'type': 'static_library',
'dependencies': [
'base',
'base_static',
'base_i18n',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/libxml/libxml.gyp:libxml',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
],
'export_dependent_settings': [
'base',
],
'conditions': [
['os_posix==0', {
'sources!': [
'test/scoped_locale.cc',
'test/scoped_locale.h',
],
}],
['os_bsd==1', {
'sources!': [
'test/test_file_util_linux.cc',
],
}],
['OS == "android"', {
'dependencies': [
'base_unittests_jni_headers',
'base_java_unittest_support',
],
}],
['OS == "ios"', {
'toolsets': ['host', 'target'],
}],
],
'sources': [
'test/gtest_util.cc',
'test/gtest_util.h',
'test/gtest_xml_unittest_result_printer.cc',
'test/gtest_xml_unittest_result_printer.h',
'test/gtest_xml_util.cc',
'test/gtest_xml_util.h',
'test/histogram_tester.cc',
'test/histogram_tester.h',
'test/icu_test_util.cc',
'test/icu_test_util.h',
'test/ios/wait_util.h',
'test/ios/wait_util.mm',
'test/launcher/test_launcher.cc',
'test/launcher/test_launcher.h',
'test/launcher/test_result.cc',
'test/launcher/test_result.h',
'test/launcher/test_results_tracker.cc',
'test/launcher/test_results_tracker.h',
'test/launcher/unit_test_launcher.cc',
'test/launcher/unit_test_launcher.h',
'test/launcher/unit_test_launcher_ios.cc',
'test/mock_chrome_application_mac.h',
'test/mock_chrome_application_mac.mm',
'test/mock_devices_changed_observer.cc',
'test/mock_devices_changed_observer.h',
'test/mock_entropy_provider.cc',
'test/mock_entropy_provider.h',
'test/mock_log.cc',
'test/mock_log.h',
'test/multiprocess_test.cc',
'test/multiprocess_test.h',
'test/multiprocess_test_android.cc',
'test/null_task_runner.cc',
'test/null_task_runner.h',
'test/opaque_ref_counted.cc',
'test/opaque_ref_counted.h',
'test/perf_log.cc',
'test/perf_log.h',
'test/perf_test_suite.cc',
'test/perf_test_suite.h',
'test/perf_time_logger.cc',
'test/perf_time_logger.h',
'test/power_monitor_test_base.cc',
'test/power_monitor_test_base.h',
'test/scoped_locale.cc',
'test/scoped_locale.h',
'test/scoped_path_override.cc',
'test/scoped_path_override.h',
'test/sequenced_task_runner_test_template.cc',
'test/sequenced_task_runner_test_template.h',
'test/sequenced_worker_pool_owner.cc',
'test/sequenced_worker_pool_owner.h',
'test/simple_test_clock.cc',
'test/simple_test_clock.h',
'test/simple_test_tick_clock.cc',
'test/simple_test_tick_clock.h',
'test/task_runner_test_template.cc',
'test/task_runner_test_template.h',
'test/test_discardable_memory_allocator.cc',
'test/test_discardable_memory_allocator.h',
'test/test_file_util.cc',
'test/test_file_util.h',
'test/test_file_util_android.cc',
'test/test_file_util_linux.cc',
'test/test_file_util_mac.cc',
'test/test_file_util_posix.cc',
'test/test_file_util_win.cc',
'test/test_io_thread.cc',
'test/test_io_thread.h',
'test/test_listener_ios.h',
'test/test_listener_ios.mm',
'test/test_mock_time_task_runner.cc',
'test/test_mock_time_task_runner.h',
'test/test_pending_task.cc',
'test/test_pending_task.h',
'test/test_reg_util_win.cc',
'test/test_reg_util_win.h',
'test/test_shortcut_win.cc',
'test/test_shortcut_win.h',
'test/test_simple_task_runner.cc',
'test/test_simple_task_runner.h',
'test/test_suite.cc',
'test/test_suite.h',
'test/test_support_android.cc',
'test/test_support_android.h',
'test/test_support_ios.h',
'test/test_support_ios.mm',
'test/test_switches.cc',
'test/test_switches.h',
'test/test_timeouts.cc',
'test/test_timeouts.h',
'test/test_ui_thread_android.cc',
'test/test_ui_thread_android.h',
'test/thread_test_helper.cc',
'test/thread_test_helper.h',
'test/trace_event_analyzer.cc',
'test/trace_event_analyzer.h',
'test/trace_to_file.cc',
'test/trace_to_file.h',
'test/user_action_tester.cc',
'test/user_action_tester.h',
'test/values_test_util.cc',
'test/values_test_util.h',
],
'target_conditions': [
['OS == "ios"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^test/test_file_util_mac\\.cc$'],
],
}],
['OS == "ios" and _toolset == "target"', {
'sources!': [
# iOS uses its own unit test launcher.
'test/launcher/unit_test_launcher.cc',
],
}],
['OS == "ios" and _toolset == "host"', {
'sources!': [
'test/launcher/unit_test_launcher_ios.cc',
'test/test_support_ios.h',
'test/test_support_ios.mm',
],
}],
], # target_conditions
},
{
'target_name': 'test_support_perf',
'type': 'static_library',
'dependencies': [
'base',
'test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'test/run_all_perftests.cc',
],
'direct_dependent_settings': {
'defines': [
'PERF_TEST',
],
},
},
{
'target_name': 'test_launcher_nacl_nonsfi',
'conditions': [
['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
'type': 'static_library',
'sources': [
'test/launcher/test_launcher_nacl_nonsfi.cc',
],
'dependencies': [
'test_support_base',
],
}, {
'type': 'none',
}],
],
},
],
'conditions': [
['OS=="ios" and "<(GENERATOR)"=="ninja"', {
'targets': [
{
'target_name': 'test_launcher',
'toolsets': ['host'],
'type': 'executable',
'dependencies': [
'test_support_base',
],
'sources': [
'test/launcher/test_launcher_ios.cc',
],
},
],
}],
['OS!="ios"', {
'targets': [
{
# GN: //base:check_example
'target_name': 'check_example',
'type': 'executable',
'sources': [
'check_example.cc',
],
'dependencies': [
'base',
],
},
{
'target_name': 'build_utf8_validator_tables',
'type': 'executable',
'toolsets': ['host'],
'dependencies': [
'base',
'../third_party/icu/icu.gyp:icuuc',
],
'sources': [
'i18n/build_utf8_validator_tables.cc'
],
},
],
}],
['OS == "win" and target_arch=="ia32"', {
'targets': [
# The base_win64 target here allows us to use base for Win64 targets
# (the normal build is 32 bits).
{
'target_name': 'base_win64',
'type': '<(component)',
'variables': {
'base_target': 1,
},
'dependencies': [
'base_static_win64',
'allocator/allocator.gyp:allocator_extension_thunks_win64',
'../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 32-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'defines': [
'BASE_WIN64',
'<@(nacl_win64_defines)',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'conditions': [
['component == "shared_library"', {
'sources!': [
'debug/debug_on_start_win.cc',
],
}],
],
# Specify delayload for base_win64.dll.
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
# Specify delayload for components that link with base_win64.lib.
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
},
# TODO(rvargas): Bug 78117. Remove this.
'msvs_disabled_warnings': [
4244,
4996,
4267,
],
'sources': [
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
'md5.cc',
'md5.h',
'message_loop/message_pump_libevent.cc',
'message_loop/message_pump_libevent.h',
'metrics/field_trial.cc',
'metrics/field_trial.h',
'posix/file_descriptor_shuffle.cc',
'posix/file_descriptor_shuffle.h',
'sync_socket.h',
'sync_socket_posix.cc',
'sync_socket_win.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
],
},
{
'target_name': 'base_i18n_nacl_win64',
'type': '<(component)',
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 32-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'defines': [
'<@(nacl_win64_defines)',
'BASE_I18N_IMPLEMENTATION',
],
'include_dirs': [
'..',
],
'sources': [
'i18n/icu_util_nacl_win64.cc',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
},
{
# TODO(rvargas): Remove this when gyp finally supports a clean model.
# See bug 36232.
'target_name': 'base_static_win64',
'type': 'static_library',
'sources': [
'base_switches.cc',
'base_switches.h',
'win/pe_image.cc',
'win/pe_image.h',
],
'sources!': [
# base64.cc depends on modp_b64.
'base64.cc',
],
'include_dirs': [
'..',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'defines': [
'<@(nacl_win64_defines)',
],
# TODO(rvargas): Bug 78117. Remove this.
'msvs_disabled_warnings': [
4244,
],
},
],
}],
['OS == "win" and target_arch=="x64"', {
'targets': [
{
'target_name': 'base_profiler_test_support_library',
# Must be a shared library so that it can be unloaded during testing.
'type': 'shared_library',
'include_dirs': [
'..',
],
'sources': [
'profiler/test_support_library.cc',
],
},
]
}],
['os_posix==1 and OS!="mac" and OS!="ios"', {
'targets': [
{
'target_name': 'symbolize',
'type': 'static_library',
'toolsets': ['host', 'target'],
'variables': {
'chromium_code': 0,
},
'conditions': [
['OS == "solaris"', {
'include_dirs': [
'/usr/gnu/include',
'/usr/gnu/include/libelf',
],
},],
],
'cflags': [
'-Wno-sign-compare',
],
'cflags!': [
'-Wextra',
],
'defines': [
'GLOG_BUILD_CONFIG_INCLUDE="build/build_config.h"',
],
'sources': [
'third_party/symbolize/config.h',
'third_party/symbolize/demangle.cc',
'third_party/symbolize/demangle.h',
'third_party/symbolize/glog/logging.h',
'third_party/symbolize/glog/raw_logging.h',
'third_party/symbolize/symbolize.cc',
'third_party/symbolize/symbolize.h',
'third_party/symbolize/utilities.h',
],
'include_dirs': [
'..',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'xdg_mime',
'type': 'static_library',
'toolsets': ['host', 'target'],
'variables': {
'chromium_code': 0,
},
'cflags!': [
'-Wextra',
],
'sources': [
'third_party/xdg_mime/xdgmime.c',
'third_party/xdg_mime/xdgmime.h',
'third_party/xdg_mime/xdgmimealias.c',
'third_party/xdg_mime/xdgmimealias.h',
'third_party/xdg_mime/xdgmimecache.c',
'third_party/xdg_mime/xdgmimecache.h',
'third_party/xdg_mime/xdgmimeglob.c',
'third_party/xdg_mime/xdgmimeglob.h',
'third_party/xdg_mime/xdgmimeicon.c',
'third_party/xdg_mime/xdgmimeicon.h',
'third_party/xdg_mime/xdgmimeint.c',
'third_party/xdg_mime/xdgmimeint.h',
'third_party/xdg_mime/xdgmimemagic.c',
'third_party/xdg_mime/xdgmimemagic.h',
'third_party/xdg_mime/xdgmimeparent.c',
'third_party/xdg_mime/xdgmimeparent.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
],
}],
['OS == "linux"', {
'targets': [
{
'target_name': 'malloc_wrapper',
'type': 'shared_library',
'dependencies': [
'base',
],
'sources': [
'test/malloc_wrapper.cc',
],
}
],
}],
['OS == "android"', {
'targets': [
{
# GN: //base:base_jni_headers
'target_name': 'base_jni_headers',
'type': 'none',
'sources': [
'android/java/src/org/chromium/base/ApkAssets.java',
'android/java/src/org/chromium/base/ApplicationStatus.java',
'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
'android/java/src/org/chromium/base/BuildInfo.java',
'android/java/src/org/chromium/base/CommandLine.java',
'android/java/src/org/chromium/base/ContentUriUtils.java',
'android/java/src/org/chromium/base/ContextUtils.java',
'android/java/src/org/chromium/base/CpuFeatures.java',
'android/java/src/org/chromium/base/EventLog.java',
'android/java/src/org/chromium/base/FieldTrialList.java',
'android/java/src/org/chromium/base/ImportantFileWriterAndroid.java',
'android/java/src/org/chromium/base/JNIUtils.java',
'android/java/src/org/chromium/base/JavaHandlerThread.java',
'android/java/src/org/chromium/base/LocaleUtils.java',
'android/java/src/org/chromium/base/MemoryPressureListener.java',
'android/java/src/org/chromium/base/PathService.java',
'android/java/src/org/chromium/base/PathUtils.java',
'android/java/src/org/chromium/base/PowerMonitor.java',
'android/java/src/org/chromium/base/SysUtils.java',
'android/java/src/org/chromium/base/SystemMessageHandler.java',
'android/java/src/org/chromium/base/ThreadUtils.java',
'android/java/src/org/chromium/base/TraceEvent.java',
'android/java/src/org/chromium/base/library_loader/LibraryLoader.java',
'android/java/src/org/chromium/base/metrics/RecordHistogram.java',
'android/java/src/org/chromium/base/metrics/RecordUserAction.java',
],
'variables': {
'jni_gen_package': 'base',
},
'dependencies': [
'android_runtime_jni_headers',
],
'includes': [ '../build/jni_generator.gypi' ],
},
{
# GN: //base:android_runtime_jni_headers
'target_name': 'android_runtime_jni_headers',
'type': 'none',
'variables': {
'jni_gen_package': 'base',
'input_java_class': 'java/lang/Runtime.class',
},
'includes': [ '../build/jar_file_jni_generator.gypi' ],
},
{
# GN: //base:base_unittests_jni_headers
'target_name': 'base_unittests_jni_headers',
'type': 'none',
'sources': [
'test/android/java/src/org/chromium/base/ContentUriTestUtils.java',
'test/android/java/src/org/chromium/base/TestUiThread.java',
],
'variables': {
'jni_gen_package': 'base',
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
# GN: //base:base_native_libraries_gen
'target_name': 'base_native_libraries_gen',
'type': 'none',
'sources': [
'android/java/templates/NativeLibraries.template',
],
'variables': {
'package_name': 'org/chromium/base/library_loader',
'template_deps': [],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
{
# GN: //base:base_multidex_gen
'target_name': 'base_multidex_gen',
'type': 'none',
'sources': [
'android/java/templates/ChromiumMultiDex.template',
],
'variables': {
'package_name': 'org/chromium/base/multidex',
'template_deps': [],
'additional_gcc_preprocess_options': [
'--defines', 'MULTIDEX_CONFIGURATION_<(CONFIGURATION_NAME)',
],
},
'includes': ['../build/android/java_cpp_template.gypi'],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_library_process_type',
'type': 'none',
'variables': {
'source_file': 'android/library_loader/library_loader_hooks.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_java
'target_name': 'base_java',
'type': 'none',
'variables': {
'java_in_dir': 'android/java',
'jar_excluded_classes': [ '*/NativeLibraries.class' ],
},
'dependencies': [
'base_java_application_state',
'base_java_library_load_from_apk_status_codes',
'base_java_library_process_type',
'base_java_memory_pressure_level',
'base_multidex_gen',
'base_native_libraries_gen',
'../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
'../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
],
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base:base_java_unittest_support
'target_name': 'base_java_unittest_support',
'type': 'none',
'dependencies': [
'base_java',
],
'variables': {
'java_in_dir': '../base/test/android/java',
},
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_application_state',
'type': 'none',
'variables': {
'source_file': 'android/application_status_listener.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_library_load_from_apk_status_codes',
'type': 'none',
'variables': {
'source_file': 'android/library_loader/library_load_from_apk_status_codes.h'
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_memory_pressure_level',
'type': 'none',
'variables': {
'source_file': 'memory/memory_pressure_listener.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_java_test_support
'target_name': 'base_java_test_support',
'type': 'none',
'dependencies': [
'base_java',
'../testing/android/on_device_instrumentation.gyp:reporter_java',
],
'variables': {
'java_in_dir': '../base/test/android/javatests',
},
'includes': [ '../build/java.gypi' ],
},
{
# TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
# in the multidex shadow library. crbug.com/522043
# GN: //base:base_junit_test_support
'target_name': 'base_junit_test_support',
'type': 'none',
'dependencies': [
'../testing/android/junit/junit_test.gyp:junit_test_support',
'../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
],
'variables': {
'src_paths': [
'../base/test/android/junit/',
],
},
'includes': [ '../build/host_jar.gypi' ]
},
{
# GN: //base:base_junit_tests
'target_name': 'base_junit_tests',
'type': 'none',
'dependencies': [
'base_java',
'base_java_test_support',
'base_junit_test_support',
'../testing/android/junit/junit_test.gyp:junit_test_support',
],
'variables': {
'main_class': 'org.chromium.testing.local.JunitTestMain',
'src_paths': [
'../base/android/junit/',
],
},
'includes': [ '../build/host_jar.gypi' ],
},
{
# GN: //base:base_javatests
'target_name': 'base_javatests',
'type': 'none',
'dependencies': [
'base_java',
'base_java_test_support',
],
'variables': {
'java_in_dir': '../base/android/javatests',
},
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base/android/linker:chromium_android_linker
'target_name': 'chromium_android_linker',
'type': 'shared_library',
'sources': [
'android/linker/android_dlext.h',
'android/linker/legacy_linker_jni.cc',
'android/linker/legacy_linker_jni.h',
'android/linker/linker_jni.cc',
'android/linker/linker_jni.h',
'android/linker/modern_linker_jni.cc',
'android/linker/modern_linker_jni.h',
],
# The crazy linker is never instrumented.
'cflags!': [
'-finstrument-functions',
],
'dependencies': [
# The NDK contains the crazy_linker here:
# '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
# However, we use our own fork. See bug 384700.
'../third_party/android_crazy_linker/crazy_linker.gyp:crazy_linker',
],
},
{
# GN: //base:base_perftests_apk
'target_name': 'base_perftests_apk',
'type': 'none',
'dependencies': [
'base_perftests',
],
'variables': {
'test_suite_name': 'base_perftests',
},
'includes': [ '../build/apk_test.gypi' ],
},
{
# GN: //base:base_unittests_apk
'target_name': 'base_unittests_apk',
'type': 'none',
'dependencies': [
'base_java',
'base_unittests',
],
'variables': {
'test_suite_name': 'base_unittests',
'isolate_file': 'base_unittests.isolate',
},
'includes': [ '../build/apk_test.gypi' ],
},
],
'conditions': [
['test_isolation_mode != "noop"',
{
'targets': [
{
'target_name': 'base_unittests_apk_run',
'type': 'none',
'dependencies': [
'base_unittests_apk',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'base_unittests_apk.isolate',
],
},
]
}
],
],
}],
['OS == "win"', {
'targets': [
{
# Target to manually rebuild pe_image_test.dll which is checked into
# base/test/data/pe_image.
'target_name': 'pe_image_test',
'type': 'shared_library',
'sources': [
'win/pe_image_test.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
'DelayLoadDLLs': [
'cfgmgr32.dll',
'shell32.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'shell32.lib',
],
},
},
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'base_unittests_run',
'type': 'none',
'dependencies': [
'base_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'base_unittests.isolate',
],
},
],
}],
],
}
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../build/win_precompile.gypi',
'base.gypi',
],
'targets': [
{
'target_name': 'base',
'type': '<(component)',
'toolsets': ['host', 'target'],
'variables': {
'base_target': 1,
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'dependencies': [
'base_static',
'allocator/allocator.gyp:allocator_extension_thunks',
'../testing/gtest.gyp:gtest_prod',
'../third_party/modp_b64/modp_b64.gyp:modp_b64',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 64-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'conditions': [
['desktop_linux == 1 or chromeos == 1', {
'conditions': [
['chromeos==1', {
'sources/': [ ['include', '_chromeos\\.cc$'] ]
}],
],
'dependencies': [
'symbolize',
'xdg_mime',
],
'defines': [
'USE_SYMBOLIZE',
],
}, { # desktop_linux == 0 and chromeos == 0
'sources/': [
['exclude', '/xdg_user_dirs/'],
['exclude', '_nss\\.cc$'],
],
}],
['use_glib==1', {
'dependencies': [
'../build/linux/system.gyp:glib',
],
'export_dependent_settings': [
'../build/linux/system.gyp:glib',
],
}],
['OS == "android" and _toolset == "host"', {
# Always build base as a static_library for host toolset, even if
# we're doing a component build. Specifically, we only care about the
# target toolset using components since that's what developers are
# focusing on. In theory we should do this more generally for all
# targets when building for host, but getting the gyp magic
# per-toolset for the "component" variable is hard, and we really only
# need base on host.
'type': 'static_library',
# Base for host support is the minimum required to run the
# ssl false start blacklist tool. It requires further changes
# to generically support host builds (and tests).
# Note: when building for host, gyp has OS == "android",
# hence the *_android.cc files are included but the actual code
# doesn't have OS_ANDROID / ANDROID defined.
'conditions': [
['host_os == "mac"', {
'sources/': [
['exclude', '^native_library_linux\\.cc$'],
['exclude', '^process_util_linux\\.cc$'],
['exclude', '^sys_info_linux\\.cc$'],
['exclude', '^sys_string_conversions_linux\\.cc$'],
['exclude', '^worker_pool_linux\\.cc$'],
],
}],
],
}],
['OS == "android" and _toolset == "target"', {
'dependencies': [
'base_java',
'base_jni_headers',
'../build/android/ndk.gyp:cpu_features',
'../third_party/ashmem/ashmem.gyp:ashmem',
],
'link_settings': {
'libraries': [
'-llog',
],
},
'sources!': [
'debug/stack_trace_posix.cc',
],
}],
['os_bsd==1', {
'include_dirs': [
'/usr/local/include',
],
'link_settings': {
'libraries': [
'-L/usr/local/lib -lexecinfo',
],
},
}],
['OS == "linux"', {
'link_settings': {
'libraries': [
# We need rt for clock_gettime().
'-lrt',
# For 'native_library_linux.cc'
'-ldl',
],
},
'conditions': [
['use_allocator!="tcmalloc"', {
'defines': [
'NO_TCMALLOC',
],
'direct_dependent_settings': {
'defines': [
'NO_TCMALLOC',
],
},
}],
],
}],
['OS == "win"', {
# Specify delayload for base.dll.
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
# Specify delayload for components that link with base.lib.
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
},
'copies': [
{
'destination': '<(PRODUCT_DIR)/',
'files': [
'../build/win/dbghelp_xp/dbghelp.dll',
],
},
],
'dependencies': [
'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
],
}],
['OS == "mac" or (OS == "ios" and _toolset == "host")', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
'$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework',
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
],
},
}],
['OS == "ios" and _toolset != "host"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreGraphics.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreText.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
],
},
}],
['OS != "win" and (OS != "ios" or _toolset == "host")', {
'dependencies': ['../third_party/libevent/libevent.gyp:libevent'],
},],
['component=="shared_library"', {
'conditions': [
['OS=="win"', {
'sources!': [
'debug/debug_on_start_win.cc',
],
}],
],
}],
['OS=="ios"', {
'sources!': [
'sync_socket.h',
'sync_socket_posix.cc',
]
}],
],
'sources': [
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
'message_loop/message_pump_android.cc',
'message_loop/message_pump_android.h',
'message_loop/message_pump_glib.cc',
'message_loop/message_pump_glib.h',
'message_loop/message_pump_io_ios.cc',
'message_loop/message_pump_io_ios.h',
'message_loop/message_pump_libevent.cc',
'message_loop/message_pump_libevent.h',
'message_loop/message_pump_mac.h',
'message_loop/message_pump_mac.mm',
'metrics/field_trial.cc',
'metrics/field_trial.h',
'posix/file_descriptor_shuffle.cc',
'posix/file_descriptor_shuffle.h',
'sync_socket.h',
'sync_socket_posix.cc',
'sync_socket_win.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_i18n',
'type': '<(component)',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
'base_i18n_target': 1,
},
'dependencies': [
'base',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
],
'conditions': [
['OS == "win"', {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [
4267,
],
}],
['icu_use_data_file_flag==1', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
}, { # else icu_use_data_file_flag !=1
'conditions': [
['OS=="win"', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
}, {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
}],
],
}],
['OS == "ios"', {
'toolsets': ['host', 'target'],
}],
],
'export_dependent_settings': [
'base',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/icu/icu.gyp:icui18n',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_message_loop_tests',
'type': 'static_library',
'dependencies': [
'base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'message_loop/message_loop_test.cc',
'message_loop/message_loop_test.h',
],
},
{
'target_name': 'base_prefs',
'type': '<(component)',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'dependencies': [
'base',
],
'export_dependent_settings': [
'base',
],
'defines': [
'BASE_PREFS_IMPLEMENTATION',
],
'sources': [
'prefs/base_prefs_export.h',
'prefs/default_pref_store.cc',
'prefs/default_pref_store.h',
'prefs/json_pref_store.cc',
'prefs/json_pref_store.h',
'prefs/overlay_user_pref_store.cc',
'prefs/overlay_user_pref_store.h',
'prefs/persistent_pref_store.h',
'prefs/pref_change_registrar.cc',
'prefs/pref_change_registrar.h',
'prefs/pref_filter.h',
'prefs/pref_member.cc',
'prefs/pref_member.h',
'prefs/pref_notifier.h',
'prefs/pref_notifier_impl.cc',
'prefs/pref_notifier_impl.h',
'prefs/pref_observer.h',
'prefs/pref_registry.cc',
'prefs/pref_registry.h',
'prefs/pref_registry_simple.cc',
'prefs/pref_registry_simple.h',
'prefs/pref_service.cc',
'prefs/pref_service.h',
'prefs/pref_service_factory.cc',
'prefs/pref_service_factory.h',
'prefs/pref_store.cc',
'prefs/pref_store.h',
'prefs/pref_value_map.cc',
'prefs/pref_value_map.h',
'prefs/pref_value_store.cc',
'prefs/pref_value_store.h',
'prefs/scoped_user_pref_update.cc',
'prefs/scoped_user_pref_update.h',
'prefs/value_map_pref_store.cc',
'prefs/value_map_pref_store.h',
'prefs/writeable_pref_store.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_prefs_test_support',
'type': 'static_library',
'dependencies': [
'base',
'base_prefs',
'../testing/gmock.gyp:gmock',
],
'sources': [
'prefs/mock_pref_change_callback.cc',
'prefs/pref_store_observer_mock.cc',
'prefs/pref_store_observer_mock.h',
'prefs/testing_pref_service.cc',
'prefs/testing_pref_service.h',
'prefs/testing_pref_store.cc',
'prefs/testing_pref_store.h',
],
},
{
# This is the subset of files from base that should not be used with a
# dynamic library. Note that this library cannot depend on base because
# base depends on base_static.
'target_name': 'base_static',
'type': 'static_library',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'toolsets': ['host', 'target'],
'sources': [
'base_switches.cc',
'base_switches.h',
'win/pe_image.cc',
'win/pe_image.h',
],
'include_dirs': [
'..',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
# Include this target for a main() function that simply instantiates
# and runs a base::TestSuite.
{
'target_name': 'run_all_unittests',
'type': 'static_library',
'dependencies': [
'test_support_base',
],
'sources': [
'test/run_all_unittests.cc',
],
},
{
'target_name': 'base_unittests',
'type': '<(gtest_target_type)',
'sources': [
'android/application_status_listener_unittest.cc',
'android/content_uri_utils_unittest.cc',
'android/jni_android_unittest.cc',
'android/jni_array_unittest.cc',
'android/jni_string_unittest.cc',
'android/library_loader/library_prefetcher_unittest.cc',
'android/path_utils_unittest.cc',
'android/scoped_java_ref_unittest.cc',
'android/sys_utils_unittest.cc',
'at_exit_unittest.cc',
'atomicops_unittest.cc',
'barrier_closure_unittest.cc',
'base64_unittest.cc',
'base64url_unittest.cc',
'big_endian_unittest.cc',
'bind_unittest.cc',
'bind_unittest.nc',
'bits_unittest.cc',
'build_time_unittest.cc',
'callback_helpers_unittest.cc',
'callback_list_unittest.cc',
'callback_list_unittest.nc',
'callback_unittest.cc',
'callback_unittest.nc',
'cancelable_callback_unittest.cc',
'command_line_unittest.cc',
'containers/adapters_unittest.cc',
'containers/hash_tables_unittest.cc',
'containers/linked_list_unittest.cc',
'containers/mru_cache_unittest.cc',
'containers/scoped_ptr_hash_map_unittest.cc',
'containers/small_map_unittest.cc',
'containers/stack_container_unittest.cc',
'cpu_unittest.cc',
'debug/crash_logging_unittest.cc',
'debug/debugger_unittest.cc',
'debug/leak_tracker_unittest.cc',
'debug/proc_maps_linux_unittest.cc',
'debug/stack_trace_unittest.cc',
'debug/task_annotator_unittest.cc',
'deferred_sequenced_task_runner_unittest.cc',
'environment_unittest.cc',
'feature_list_unittest.cc',
'file_version_info_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'files/file_path_unittest.cc',
'files/file_path_watcher_unittest.cc',
'files/file_proxy_unittest.cc',
'files/file_unittest.cc',
'files/file_util_proxy_unittest.cc',
'files/file_util_unittest.cc',
'files/important_file_writer_unittest.cc',
'files/memory_mapped_file_unittest.cc',
'files/scoped_temp_dir_unittest.cc',
'gmock_unittest.cc',
'guid_unittest.cc',
'hash_unittest.cc',
'i18n/break_iterator_unittest.cc',
'i18n/case_conversion_unittest.cc',
'i18n/char_iterator_unittest.cc',
'i18n/file_util_icu_unittest.cc',
'i18n/icu_string_conversions_unittest.cc',
'i18n/message_formatter_unittest.cc',
'i18n/number_formatting_unittest.cc',
'i18n/rtl_unittest.cc',
'i18n/streaming_utf8_validator_unittest.cc',
'i18n/string_search_unittest.cc',
'i18n/time_formatting_unittest.cc',
'i18n/timezone_unittest.cc',
'id_map_unittest.cc',
'ios/crb_protocol_observers_unittest.mm',
'ios/device_util_unittest.mm',
'ios/weak_nsobject_unittest.mm',
'json/json_parser_unittest.cc',
'json/json_reader_unittest.cc',
'json/json_value_converter_unittest.cc',
'json/json_value_serializer_unittest.cc',
'json/json_writer_unittest.cc',
'json/string_escape_unittest.cc',
'lazy_instance_unittest.cc',
'logging_unittest.cc',
'mac/bind_objc_block_unittest.mm',
'mac/call_with_eh_frame_unittest.mm',
'mac/dispatch_source_mach_unittest.cc',
'mac/foundation_util_unittest.mm',
'mac/libdispatch_task_runner_unittest.cc',
'mac/mac_util_unittest.mm',
'mac/objc_property_releaser_unittest.mm',
'mac/scoped_nsobject_unittest.mm',
'mac/scoped_objc_class_swizzler_unittest.mm',
'mac/scoped_sending_event_unittest.mm',
'md5_unittest.cc',
'memory/aligned_memory_unittest.cc',
'memory/discardable_shared_memory_unittest.cc',
'memory/linked_ptr_unittest.cc',
'memory/memory_pressure_listener_unittest.cc',
'memory/memory_pressure_monitor_chromeos_unittest.cc',
'memory/memory_pressure_monitor_mac_unittest.cc',
'memory/memory_pressure_monitor_win_unittest.cc',
'memory/ref_counted_memory_unittest.cc',
'memory/ref_counted_unittest.cc',
'memory/scoped_ptr_unittest.cc',
'memory/scoped_ptr_unittest.nc',
'memory/scoped_vector_unittest.cc',
'memory/shared_memory_unittest.cc',
'memory/shared_memory_mac_unittest.cc',
'memory/singleton_unittest.cc',
'memory/weak_ptr_unittest.cc',
'memory/weak_ptr_unittest.nc',
'message_loop/message_loop_task_runner_unittest.cc',
'message_loop/message_loop_unittest.cc',
'message_loop/message_pump_glib_unittest.cc',
'message_loop/message_pump_io_ios_unittest.cc',
'message_loop/message_pump_libevent_unittest.cc',
'metrics/bucket_ranges_unittest.cc',
'metrics/field_trial_unittest.cc',
'metrics/histogram_base_unittest.cc',
'metrics/histogram_delta_serialization_unittest.cc',
'metrics/histogram_macros_unittest.cc',
'metrics/histogram_snapshot_manager_unittest.cc',
'metrics/histogram_unittest.cc',
'metrics/metrics_hashes_unittest.cc',
'metrics/sample_map_unittest.cc',
'metrics/sample_vector_unittest.cc',
'metrics/sparse_histogram_unittest.cc',
'metrics/statistics_recorder_unittest.cc',
'native_library_unittest.cc',
'numerics/safe_numerics_unittest.cc',
'observer_list_unittest.cc',
'os_compat_android_unittest.cc',
'path_service_unittest.cc',
'pickle_unittest.cc',
'posix/file_descriptor_shuffle_unittest.cc',
'posix/unix_domain_socket_linux_unittest.cc',
'power_monitor/power_monitor_unittest.cc',
'prefs/default_pref_store_unittest.cc',
'prefs/json_pref_store_unittest.cc',
'prefs/mock_pref_change_callback.h',
'prefs/overlay_user_pref_store_unittest.cc',
'prefs/pref_change_registrar_unittest.cc',
'prefs/pref_member_unittest.cc',
'prefs/pref_notifier_impl_unittest.cc',
'prefs/pref_service_unittest.cc',
'prefs/pref_value_map_unittest.cc',
'prefs/pref_value_store_unittest.cc',
'prefs/scoped_user_pref_update_unittest.cc',
'process/memory_unittest.cc',
'process/memory_unittest_mac.h',
'process/memory_unittest_mac.mm',
'process/process_metrics_unittest.cc',
'process/process_metrics_unittest_ios.cc',
'process/process_unittest.cc',
'process/process_util_unittest.cc',
'profiler/stack_sampling_profiler_unittest.cc',
'profiler/tracked_time_unittest.cc',
'rand_util_unittest.cc',
'scoped_clear_errno_unittest.cc',
'scoped_generic_unittest.cc',
'scoped_native_library_unittest.cc',
'security_unittest.cc',
'sequence_checker_unittest.cc',
'sha1_unittest.cc',
'stl_util_unittest.cc',
'strings/nullable_string16_unittest.cc',
'strings/pattern_unittest.cc',
'strings/safe_sprintf_unittest.cc',
'strings/string16_unittest.cc',
'strings/string_number_conversions_unittest.cc',
'strings/string_piece_unittest.cc',
'strings/string_split_unittest.cc',
'strings/string_tokenizer_unittest.cc',
'strings/string_util_unittest.cc',
'strings/stringize_macros_unittest.cc',
'strings/stringprintf_unittest.cc',
'strings/sys_string_conversions_mac_unittest.mm',
'strings/sys_string_conversions_unittest.cc',
'strings/utf_offset_string_conversions_unittest.cc',
'strings/utf_string_conversions_unittest.cc',
'supports_user_data_unittest.cc',
'sync_socket_unittest.cc',
'synchronization/cancellation_flag_unittest.cc',
'synchronization/condition_variable_unittest.cc',
'synchronization/lock_unittest.cc',
'synchronization/waitable_event_unittest.cc',
'synchronization/waitable_event_watcher_unittest.cc',
'sys_info_unittest.cc',
'system_monitor/system_monitor_unittest.cc',
'task/cancelable_task_tracker_unittest.cc',
'task_runner_util_unittest.cc',
'template_util_unittest.cc',
'test/histogram_tester_unittest.cc',
'test/test_pending_task_unittest.cc',
'test/test_reg_util_win_unittest.cc',
'test/trace_event_analyzer_unittest.cc',
'test/user_action_tester_unittest.cc',
'threading/non_thread_safe_unittest.cc',
'threading/platform_thread_unittest.cc',
'threading/sequenced_worker_pool_unittest.cc',
'threading/sequenced_task_runner_handle_unittest.cc',
'threading/simple_thread_unittest.cc',
'threading/thread_checker_unittest.cc',
'threading/thread_collision_warner_unittest.cc',
'threading/thread_id_name_manager_unittest.cc',
'threading/thread_local_storage_unittest.cc',
'threading/thread_local_unittest.cc',
'threading/thread_unittest.cc',
'threading/watchdog_unittest.cc',
'threading/worker_pool_posix_unittest.cc',
'threading/worker_pool_unittest.cc',
'time/pr_time_unittest.cc',
'time/time_unittest.cc',
'time/time_win_unittest.cc',
'timer/hi_res_timer_manager_unittest.cc',
'timer/mock_timer_unittest.cc',
'timer/timer_unittest.cc',
'tools_sanity_unittest.cc',
'tracked_objects_unittest.cc',
'tuple_unittest.cc',
'values_unittest.cc',
'version_unittest.cc',
'vlog_unittest.cc',
'win/dllmain.cc',
'win/enum_variant_unittest.cc',
'win/event_trace_consumer_unittest.cc',
'win/event_trace_controller_unittest.cc',
'win/event_trace_provider_unittest.cc',
'win/i18n_unittest.cc',
'win/iunknown_impl_unittest.cc',
'win/message_window_unittest.cc',
'win/object_watcher_unittest.cc',
'win/pe_image_unittest.cc',
'win/registry_unittest.cc',
'win/scoped_bstr_unittest.cc',
'win/scoped_comptr_unittest.cc',
'win/scoped_handle_unittest.cc',
'win/scoped_process_information_unittest.cc',
'win/scoped_variant_unittest.cc',
'win/shortcut_unittest.cc',
'win/startup_information_unittest.cc',
'win/win_util_unittest.cc',
'win/wrapped_window_proc_unittest.cc',
'<@(trace_event_test_sources)',
],
'dependencies': [
'base',
'base_i18n',
'base_message_loop_tests',
'base_prefs',
'base_prefs_test_support',
'base_static',
'run_all_unittests',
'test_support_base',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
],
'includes': ['../build/nocompile.gypi'],
'variables': {
# TODO(ajwong): Is there a way to autodetect this?
'module_dir': 'base'
},
'conditions': [
['OS == "android"', {
'dependencies': [
'android/jni_generator/jni_generator.gyp:jni_generator_tests',
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
['OS == "ios" and _toolset != "host"', {
'sources/': [
# iOS does not support FilePathWatcher.
['exclude', '^files/file_path_watcher_unittest\\.cc$'],
# Only test the iOS-meaningful portion of memory and process_utils.
['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'],
['exclude', '^memory/shared_memory_unittest\\.cc$'],
['exclude', '^process/memory_unittest'],
['exclude', '^process/process_unittest\\.cc$'],
['exclude', '^process/process_util_unittest\\.cc$'],
['include', '^process/process_util_unittest_ios\\.cc$'],
# iOS does not use message_pump_libevent.
['exclude', '^message_loop/message_pump_libevent_unittest\\.cc$'],
],
'actions': [
{
'action_name': 'copy_test_data',
'variables': {
'test_data_files': [
'test/data',
],
'test_data_prefix': 'base',
},
'includes': [ '../build/copy_test_data_ios.gypi' ],
},
],
}],
['desktop_linux == 1 or chromeos == 1', {
'defines': [
'USE_SYMBOLIZE',
],
'sources!': [
'file_version_info_unittest.cc',
],
'conditions': [
[ 'desktop_linux==1', {
'sources': [
'nix/xdg_util_unittest.cc',
],
}],
],
}],
['use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:glib',
],
}, { # use_glib == 0
'sources!': [
'message_loop/message_pump_glib_unittest.cc',
]
}],
['use_ozone == 1', {
'sources!': [
'message_loop/message_pump_glib_unittest.cc',
]
}],
['OS == "linux"', {
'dependencies': [
'malloc_wrapper',
],
'conditions': [
['use_allocator!="none"', {
'dependencies': [
'allocator/allocator.gyp:allocator',
],
}],
]},
],
[ 'OS == "win" and target_arch == "x64"', {
'sources': [
'profiler/win32_stack_frame_unwinder_unittest.cc',
],
'dependencies': [
'base_profiler_test_support_library',
],
}],
['OS == "win"', {
'sources!': [
'file_descriptor_shuffle_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'message_loop/message_pump_libevent_unittest.cc',
'threading/worker_pool_posix_unittest.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [
4267,
],
'conditions': [
# This is needed so base_unittests uses the allocator shim, as
# SecurityTest.MemoryAllocationRestriction* tests are dependent
# on tcmalloc.
# TODO(wfh): crbug.com/246278 Move tcmalloc specific tests into
# their own test suite.
['win_use_allocator_shim==1', {
'dependencies': [
'allocator/allocator.gyp:allocator',
],
}],
['icu_use_data_file_flag==0', {
# This is needed to trigger the dll copy step on windows.
# TODO(mark): This should not be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
}],
],
}, { # OS != "win"
'dependencies': [
'../third_party/libevent/libevent.gyp:libevent'
],
}],
], # conditions
'target_conditions': [
['OS == "ios" and _toolset != "host"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^mac/bind_objc_block_unittest\\.mm$'],
['include', '^mac/foundation_util_unittest\\.mm$',],
['include', '^mac/objc_property_releaser_unittest\\.mm$'],
['include', '^mac/scoped_nsobject_unittest\\.mm$'],
['include', '^sys_string_conversions_mac_unittest\\.mm$'],
],
}],
['OS == "android"', {
'sources/': [
['include', '^debug/proc_maps_linux_unittest\\.cc$'],
],
}],
# Enable more direct string conversions on platforms with native utf8
# strings
['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
'defines': ['SYSTEM_NATIVE_UTF8'],
}],
# SyncSocket isn't used on iOS
['OS=="ios"', {
'sources!': [
'sync_socket_unittest.cc',
],
}],
], # target_conditions
},
{
# GN: //base:base_perftests
'target_name': 'base_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'base',
'test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'message_loop/message_pump_perftest.cc',
'test/run_all_unittests.cc',
'threading/thread_perftest.cc',
'../testing/perf/perf_test.cc'
],
'conditions': [
['OS == "android"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
],
},
{
# GN: //base:base_i18n_perftests
'target_name': 'base_i18n_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'test_support_base',
'test_support_perf',
'../testing/gtest.gyp:gtest',
'base_i18n',
'base',
],
'sources': [
'i18n/streaming_utf8_validator_perftest.cc',
],
},
{
# GN: //base/test:test_support
'target_name': 'test_support_base',
'type': 'static_library',
'dependencies': [
'base',
'base_static',
'base_i18n',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/libxml/libxml.gyp:libxml',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
],
'export_dependent_settings': [
'base',
],
'conditions': [
['os_posix==0', {
'sources!': [
'test/scoped_locale.cc',
'test/scoped_locale.h',
],
}],
['os_bsd==1', {
'sources!': [
'test/test_file_util_linux.cc',
],
}],
['OS == "android"', {
'dependencies': [
'base_unittests_jni_headers',
'base_java_unittest_support',
],
}],
['OS == "ios"', {
'toolsets': ['host', 'target'],
}],
],
'sources': [
'test/gtest_util.cc',
'test/gtest_util.h',
'test/gtest_xml_unittest_result_printer.cc',
'test/gtest_xml_unittest_result_printer.h',
'test/gtest_xml_util.cc',
'test/gtest_xml_util.h',
'test/histogram_tester.cc',
'test/histogram_tester.h',
'test/icu_test_util.cc',
'test/icu_test_util.h',
'test/ios/wait_util.h',
'test/ios/wait_util.mm',
'test/launcher/test_launcher.cc',
'test/launcher/test_launcher.h',
'test/launcher/test_result.cc',
'test/launcher/test_result.h',
'test/launcher/test_results_tracker.cc',
'test/launcher/test_results_tracker.h',
'test/launcher/unit_test_launcher.cc',
'test/launcher/unit_test_launcher.h',
'test/launcher/unit_test_launcher_ios.cc',
'test/mock_chrome_application_mac.h',
'test/mock_chrome_application_mac.mm',
'test/mock_devices_changed_observer.cc',
'test/mock_devices_changed_observer.h',
'test/mock_entropy_provider.cc',
'test/mock_entropy_provider.h',
'test/mock_log.cc',
'test/mock_log.h',
'test/multiprocess_test.cc',
'test/multiprocess_test.h',
'test/multiprocess_test_android.cc',
'test/null_task_runner.cc',
'test/null_task_runner.h',
'test/opaque_ref_counted.cc',
'test/opaque_ref_counted.h',
'test/perf_log.cc',
'test/perf_log.h',
'test/perf_test_suite.cc',
'test/perf_test_suite.h',
'test/perf_time_logger.cc',
'test/perf_time_logger.h',
'test/power_monitor_test_base.cc',
'test/power_monitor_test_base.h',
'test/scoped_locale.cc',
'test/scoped_locale.h',
'test/scoped_path_override.cc',
'test/scoped_path_override.h',
'test/sequenced_task_runner_test_template.cc',
'test/sequenced_task_runner_test_template.h',
'test/sequenced_worker_pool_owner.cc',
'test/sequenced_worker_pool_owner.h',
'test/simple_test_clock.cc',
'test/simple_test_clock.h',
'test/simple_test_tick_clock.cc',
'test/simple_test_tick_clock.h',
'test/task_runner_test_template.cc',
'test/task_runner_test_template.h',
'test/test_discardable_memory_allocator.cc',
'test/test_discardable_memory_allocator.h',
'test/test_file_util.cc',
'test/test_file_util.h',
'test/test_file_util_android.cc',
'test/test_file_util_linux.cc',
'test/test_file_util_mac.cc',
'test/test_file_util_posix.cc',
'test/test_file_util_win.cc',
'test/test_io_thread.cc',
'test/test_io_thread.h',
'test/test_listener_ios.h',
'test/test_listener_ios.mm',
'test/test_mock_time_task_runner.cc',
'test/test_mock_time_task_runner.h',
'test/test_pending_task.cc',
'test/test_pending_task.h',
'test/test_reg_util_win.cc',
'test/test_reg_util_win.h',
'test/test_shortcut_win.cc',
'test/test_shortcut_win.h',
'test/test_simple_task_runner.cc',
'test/test_simple_task_runner.h',
'test/test_suite.cc',
'test/test_suite.h',
'test/test_support_android.cc',
'test/test_support_android.h',
'test/test_support_ios.h',
'test/test_support_ios.mm',
'test/test_switches.cc',
'test/test_switches.h',
'test/test_timeouts.cc',
'test/test_timeouts.h',
'test/test_ui_thread_android.cc',
'test/test_ui_thread_android.h',
'test/thread_test_helper.cc',
'test/thread_test_helper.h',
'test/trace_event_analyzer.cc',
'test/trace_event_analyzer.h',
'test/trace_to_file.cc',
'test/trace_to_file.h',
'test/user_action_tester.cc',
'test/user_action_tester.h',
'test/values_test_util.cc',
'test/values_test_util.h',
],
'target_conditions': [
['OS == "ios"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^test/test_file_util_mac\\.cc$'],
],
}],
['OS == "ios" and _toolset == "target"', {
'sources!': [
# iOS uses its own unit test launcher.
'test/launcher/unit_test_launcher.cc',
],
}],
['OS == "ios" and _toolset == "host"', {
'sources!': [
'test/launcher/unit_test_launcher_ios.cc',
'test/test_support_ios.h',
'test/test_support_ios.mm',
],
}],
], # target_conditions
},
{
'target_name': 'test_support_perf',
'type': 'static_library',
'dependencies': [
'base',
'test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'test/run_all_perftests.cc',
],
'direct_dependent_settings': {
'defines': [
'PERF_TEST',
],
},
},
{
'target_name': 'test_launcher_nacl_nonsfi',
'conditions': [
['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
'type': 'static_library',
'sources': [
'test/launcher/test_launcher_nacl_nonsfi.cc',
],
'dependencies': [
'test_support_base',
],
}, {
'type': 'none',
}],
],
},
],
'conditions': [
['OS=="ios" and "<(GENERATOR)"=="ninja"', {
'targets': [
{
'target_name': 'test_launcher',
'toolsets': ['host'],
'type': 'executable',
'dependencies': [
'test_support_base',
],
'sources': [
'test/launcher/test_launcher_ios.cc',
],
},
],
}],
['OS!="ios"', {
'targets': [
{
# GN: //base:check_example
'target_name': 'check_example',
'type': 'executable',
'sources': [
'check_example.cc',
],
'dependencies': [
'base',
],
},
{
'target_name': 'build_utf8_validator_tables',
'type': 'executable',
'toolsets': ['host'],
'dependencies': [
'base',
'../third_party/icu/icu.gyp:icuuc',
],
'sources': [
'i18n/build_utf8_validator_tables.cc'
],
},
],
}],
['OS == "win" and target_arch=="ia32"', {
'targets': [
# The base_win64 target here allows us to use base for Win64 targets
# (the normal build is 32 bits).
{
'target_name': 'base_win64',
'type': '<(component)',
'variables': {
'base_target': 1,
},
'dependencies': [
'base_static_win64',
'allocator/allocator.gyp:allocator_extension_thunks_win64',
'../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 32-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'defines': [
'BASE_WIN64',
'<@(nacl_win64_defines)',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'conditions': [
['component == "shared_library"', {
'sources!': [
'debug/debug_on_start_win.cc',
],
}],
],
# Specify delayload for base_win64.dll.
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
# Specify delayload for components that link with base_win64.lib.
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
},
# TODO(rvargas): Bug 78117. Remove this.
'msvs_disabled_warnings': [
4244,
4996,
4267,
],
'sources': [
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
'md5.cc',
'md5.h',
'message_loop/message_pump_libevent.cc',
'message_loop/message_pump_libevent.h',
'metrics/field_trial.cc',
'metrics/field_trial.h',
'posix/file_descriptor_shuffle.cc',
'posix/file_descriptor_shuffle.h',
'sync_socket.h',
'sync_socket_posix.cc',
'sync_socket_win.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
],
},
{
'target_name': 'base_i18n_nacl_win64',
'type': '<(component)',
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 32-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'defines': [
'<@(nacl_win64_defines)',
'BASE_I18N_IMPLEMENTATION',
],
'include_dirs': [
'..',
],
'sources': [
'i18n/icu_util_nacl_win64.cc',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
},
{
# TODO(rvargas): Remove this when gyp finally supports a clean model.
# See bug 36232.
'target_name': 'base_static_win64',
'type': 'static_library',
'sources': [
'base_switches.cc',
'base_switches.h',
'win/pe_image.cc',
'win/pe_image.h',
],
'sources!': [
# base64.cc depends on modp_b64.
'base64.cc',
],
'include_dirs': [
'..',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'defines': [
'<@(nacl_win64_defines)',
],
# TODO(rvargas): Bug 78117. Remove this.
'msvs_disabled_warnings': [
4244,
],
},
],
}],
['OS == "win" and target_arch=="x64"', {
'targets': [
{
'target_name': 'base_profiler_test_support_library',
# Must be a shared library so that it can be unloaded during testing.
'type': 'shared_library',
'include_dirs': [
'..',
],
'sources': [
'profiler/test_support_library.cc',
],
},
]
}],
['os_posix==1 and OS!="mac" and OS!="ios"', {
'targets': [
{
'target_name': 'symbolize',
'type': 'static_library',
'toolsets': ['host', 'target'],
'variables': {
'chromium_code': 0,
},
'conditions': [
['OS == "solaris"', {
'include_dirs': [
'/usr/gnu/include',
'/usr/gnu/include/libelf',
],
},],
],
'cflags': [
'-Wno-sign-compare',
],
'cflags!': [
'-Wextra',
],
'defines': [
'GLOG_BUILD_CONFIG_INCLUDE="build/build_config.h"',
],
'sources': [
'third_party/symbolize/config.h',
'third_party/symbolize/demangle.cc',
'third_party/symbolize/demangle.h',
'third_party/symbolize/glog/logging.h',
'third_party/symbolize/glog/raw_logging.h',
'third_party/symbolize/symbolize.cc',
'third_party/symbolize/symbolize.h',
'third_party/symbolize/utilities.h',
],
'include_dirs': [
'..',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'xdg_mime',
'type': 'static_library',
'toolsets': ['host', 'target'],
'variables': {
'chromium_code': 0,
},
'cflags!': [
'-Wextra',
],
'sources': [
'third_party/xdg_mime/xdgmime.c',
'third_party/xdg_mime/xdgmime.h',
'third_party/xdg_mime/xdgmimealias.c',
'third_party/xdg_mime/xdgmimealias.h',
'third_party/xdg_mime/xdgmimecache.c',
'third_party/xdg_mime/xdgmimecache.h',
'third_party/xdg_mime/xdgmimeglob.c',
'third_party/xdg_mime/xdgmimeglob.h',
'third_party/xdg_mime/xdgmimeicon.c',
'third_party/xdg_mime/xdgmimeicon.h',
'third_party/xdg_mime/xdgmimeint.c',
'third_party/xdg_mime/xdgmimeint.h',
'third_party/xdg_mime/xdgmimemagic.c',
'third_party/xdg_mime/xdgmimemagic.h',
'third_party/xdg_mime/xdgmimeparent.c',
'third_party/xdg_mime/xdgmimeparent.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
],
}],
['OS == "linux"', {
'targets': [
{
'target_name': 'malloc_wrapper',
'type': 'shared_library',
'dependencies': [
'base',
],
'sources': [
'test/malloc_wrapper.cc',
],
}
],
}],
['OS == "android"', {
'targets': [
{
# GN: //base:base_jni_headers
'target_name': 'base_jni_headers',
'type': 'none',
'sources': [
'android/java/src/org/chromium/base/ApkAssets.java',
'android/java/src/org/chromium/base/ApplicationStatus.java',
'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
'android/java/src/org/chromium/base/BuildInfo.java',
'android/java/src/org/chromium/base/CommandLine.java',
'android/java/src/org/chromium/base/ContentUriUtils.java',
'android/java/src/org/chromium/base/ContextUtils.java',
'android/java/src/org/chromium/base/CpuFeatures.java',
'android/java/src/org/chromium/base/EventLog.java',
'android/java/src/org/chromium/base/FieldTrialList.java',
'android/java/src/org/chromium/base/ImportantFileWriterAndroid.java',
'android/java/src/org/chromium/base/JNIUtils.java',
'android/java/src/org/chromium/base/JavaHandlerThread.java',
'android/java/src/org/chromium/base/LocaleUtils.java',
'android/java/src/org/chromium/base/MemoryPressureListener.java',
'android/java/src/org/chromium/base/PathService.java',
'android/java/src/org/chromium/base/PathUtils.java',
'android/java/src/org/chromium/base/PowerMonitor.java',
'android/java/src/org/chromium/base/SysUtils.java',
'android/java/src/org/chromium/base/SystemMessageHandler.java',
'android/java/src/org/chromium/base/ThreadUtils.java',
'android/java/src/org/chromium/base/TraceEvent.java',
'android/java/src/org/chromium/base/library_loader/LibraryLoader.java',
'android/java/src/org/chromium/base/metrics/RecordHistogram.java',
'android/java/src/org/chromium/base/metrics/RecordUserAction.java',
],
'variables': {
'jni_gen_package': 'base',
},
'dependencies': [
'android_runtime_jni_headers',
],
'includes': [ '../build/jni_generator.gypi' ],
},
{
# GN: //base:android_runtime_jni_headers
'target_name': 'android_runtime_jni_headers',
'type': 'none',
'variables': {
'jni_gen_package': 'base',
'input_java_class': 'java/lang/Runtime.class',
},
'includes': [ '../build/jar_file_jni_generator.gypi' ],
},
{
# GN: //base:base_unittests_jni_headers
'target_name': 'base_unittests_jni_headers',
'type': 'none',
'sources': [
'test/android/java/src/org/chromium/base/ContentUriTestUtils.java',
'test/android/java/src/org/chromium/base/TestUiThread.java',
],
'variables': {
'jni_gen_package': 'base',
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
# GN: //base:base_native_libraries_gen
'target_name': 'base_native_libraries_gen',
'type': 'none',
'sources': [
'android/java/templates/NativeLibraries.template',
],
'variables': {
'package_name': 'org/chromium/base/library_loader',
'template_deps': [],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
{
# GN: //base:base_multidex_gen
'target_name': 'base_multidex_gen',
'type': 'none',
'sources': [
'android/java/templates/ChromiumMultiDex.template',
],
'variables': {
'package_name': 'org/chromium/base/multidex',
'template_deps': [],
'additional_gcc_preprocess_options': [
'--defines', 'MULTIDEX_CONFIGURATION_<(CONFIGURATION_NAME)',
],
},
'includes': ['../build/android/java_cpp_template.gypi'],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_library_process_type',
'type': 'none',
'variables': {
'source_file': 'android/library_loader/library_loader_hooks.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_java
'target_name': 'base_java',
'type': 'none',
'variables': {
'java_in_dir': 'android/java',
'jar_excluded_classes': [ '*/NativeLibraries.class' ],
},
'dependencies': [
'base_java_application_state',
'base_java_library_load_from_apk_status_codes',
'base_java_library_process_type',
'base_java_memory_pressure_level',
'base_multidex_gen',
'base_native_libraries_gen',
'../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
'../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
],
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base:base_java_unittest_support
'target_name': 'base_java_unittest_support',
'type': 'none',
'dependencies': [
'base_java',
],
'variables': {
'java_in_dir': '../base/test/android/java',
},
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_application_state',
'type': 'none',
'variables': {
'source_file': 'android/application_status_listener.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_library_load_from_apk_status_codes',
'type': 'none',
'variables': {
'source_file': 'android/library_loader/library_load_from_apk_status_codes.h'
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_memory_pressure_level',
'type': 'none',
'variables': {
'source_file': 'memory/memory_pressure_listener.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_java_test_support
'target_name': 'base_java_test_support',
'type': 'none',
'dependencies': [
'base_java',
'../testing/android/on_device_instrumentation.gyp:reporter_java',
],
'variables': {
'java_in_dir': '../base/test/android/javatests',
},
'includes': [ '../build/java.gypi' ],
},
{
# TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
# in the multidex shadow library. crbug.com/522043
# GN: //base:base_junit_test_support
'target_name': 'base_junit_test_support',
'type': 'none',
'dependencies': [
'../testing/android/junit/junit_test.gyp:junit_test_support',
'../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
],
'variables': {
'src_paths': [
'../base/test/android/junit/',
],
},
'includes': [ '../build/host_jar.gypi' ]
},
{
# GN: //base:base_junit_tests
'target_name': 'base_junit_tests',
'type': 'none',
'dependencies': [
'base_java',
'base_java_test_support',
'base_junit_test_support',
'../testing/android/junit/junit_test.gyp:junit_test_support',
],
'variables': {
'main_class': 'org.chromium.testing.local.JunitTestMain',
'src_paths': [
'../base/android/junit/',
],
},
'includes': [ '../build/host_jar.gypi' ],
},
{
# GN: //base:base_javatests
'target_name': 'base_javatests',
'type': 'none',
'dependencies': [
'base_java',
'base_java_test_support',
],
'variables': {
'java_in_dir': '../base/android/javatests',
},
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base/android/linker:chromium_android_linker
'target_name': 'chromium_android_linker',
'type': 'shared_library',
'sources': [
'android/linker/android_dlext.h',
'android/linker/legacy_linker_jni.cc',
'android/linker/legacy_linker_jni.h',
'android/linker/linker_jni.cc',
'android/linker/linker_jni.h',
'android/linker/modern_linker_jni.cc',
'android/linker/modern_linker_jni.h',
],
# The crazy linker is never instrumented.
'cflags!': [
'-finstrument-functions',
],
'dependencies': [
# The NDK contains the crazy_linker here:
# '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
# However, we use our own fork. See bug 384700.
'../third_party/android_crazy_linker/crazy_linker.gyp:crazy_linker',
],
},
{
# GN: //base:base_perftests_apk
'target_name': 'base_perftests_apk',
'type': 'none',
'dependencies': [
'base_perftests',
],
'variables': {
'test_suite_name': 'base_perftests',
},
'includes': [ '../build/apk_test.gypi' ],
},
{
# GN: //base:base_unittests_apk
'target_name': 'base_unittests_apk',
'type': 'none',
'dependencies': [
'base_java',
'base_unittests',
],
'variables': {
'test_suite_name': 'base_unittests',
'isolate_file': 'base_unittests.isolate',
},
'includes': [ '../build/apk_test.gypi' ],
},
],
'conditions': [
['test_isolation_mode != "noop"',
{
'targets': [
{
'target_name': 'base_unittests_apk_run',
'type': 'none',
'dependencies': [
'base_unittests_apk',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'base_unittests_apk.isolate',
],
},
]
}
],
],
}],
['OS == "win"', {
'targets': [
{
# Target to manually rebuild pe_image_test.dll which is checked into
# base/test/data/pe_image.
'target_name': 'pe_image_test',
'type': 'shared_library',
'sources': [
'win/pe_image_test.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
'DelayLoadDLLs': [
'cfgmgr32.dll',
'shell32.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'shell32.lib',
],
},
},
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'base_unittests_run',
'type': 'none',
'dependencies': [
'base_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'base_unittests.isolate',
],
},
],
}],
],
} | en | 0.806921 | # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # TODO(gregoryd): direct_dependent_settings should be shared with the # 64-bit target, but it doesn't work due to a bug in gyp # desktop_linux == 0 and chromeos == 0 # Always build base as a static_library for host toolset, even if # we're doing a component build. Specifically, we only care about the # target toolset using components since that's what developers are # focusing on. In theory we should do this more generally for all # targets when building for host, but getting the gyp magic # per-toolset for the "component" variable is hard, and we really only # need base on host. # Base for host support is the minimum required to run the # ssl false start blacklist tool. It requires further changes # to generically support host builds (and tests). # Note: when building for host, gyp has OS == "android", # hence the *_android.cc files are included but the actual code # doesn't have OS_ANDROID / ANDROID defined. # We need rt for clock_gettime(). # For 'native_library_linux.cc' # Specify delayload for base.dll. # Specify delayload for components that link with base.lib. # TODO(jschuh): crbug.com/167187 fix size_t to int truncations. # else icu_use_data_file_flag !=1 # This is the subset of files from base that should not be used with a # dynamic library. Note that this library cannot depend on base because # base depends on base_static. # Include this target for a main() function that simply instantiates # and runs a base::TestSuite. # TODO(ajwong): Is there a way to autodetect this? # iOS does not support FilePathWatcher. # Only test the iOS-meaningful portion of memory and process_utils. # iOS does not use message_pump_libevent. # use_glib == 0 # TODO(jschuh): crbug.com/167187 fix size_t to int truncations. # This is needed so base_unittests uses the allocator shim, as # SecurityTest.MemoryAllocationRestriction* tests are dependent # on tcmalloc. # TODO(wfh): crbug.com/246278 Move tcmalloc specific tests into # their own test suite. # This is needed to trigger the dll copy step on windows. # TODO(mark): This should not be necessary. # OS != "win" # conditions # Pull in specific Mac files for iOS (which have been filtered out # by file name rules). # Enable more direct string conversions on platforms with native utf8 # strings # SyncSocket isn't used on iOS # target_conditions # GN: //base:base_perftests # GN: //base:base_i18n_perftests # GN: //base/test:test_support # Pull in specific Mac files for iOS (which have been filtered out # by file name rules). # iOS uses its own unit test launcher. # target_conditions # GN: //base:check_example # The base_win64 target here allows us to use base for Win64 targets # (the normal build is 32 bits). # TODO(gregoryd): direct_dependent_settings should be shared with the # 32-bit target, but it doesn't work due to a bug in gyp # Specify delayload for base_win64.dll. # Specify delayload for components that link with base_win64.lib. # TODO(rvargas): Bug 78117. Remove this. # TODO(gregoryd): direct_dependent_settings should be shared with the # 32-bit target, but it doesn't work due to a bug in gyp # TODO(rvargas): Remove this when gyp finally supports a clean model. # See bug 36232. # base64.cc depends on modp_b64. # TODO(rvargas): Bug 78117. Remove this. # Must be a shared library so that it can be unloaded during testing. # GN: //base:base_jni_headers # GN: //base:android_runtime_jni_headers # GN: //base:base_unittests_jni_headers # GN: //base:base_native_libraries_gen # GN: //base:base_multidex_gen # GN: //base:base_android_java_enums_srcjar # GN: //base:base_java # GN: //base:base_java_unittest_support # GN: //base:base_android_java_enums_srcjar # GN: //base:base_android_java_enums_srcjar # GN: //base:base_android_java_enums_srcjar # GN: //base:base_java_test_support # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull # in the multidex shadow library. crbug.com/522043 # GN: //base:base_junit_test_support # GN: //base:base_junit_tests # GN: //base:base_javatests # GN: //base/android/linker:chromium_android_linker # The crazy linker is never instrumented. # The NDK contains the crazy_linker here: # '<(android_ndk_root)/crazy_linker.gyp:crazy_linker' # However, we use our own fork. See bug 384700. # GN: //base:base_perftests_apk # GN: //base:base_unittests_apk # Target to manually rebuild pe_image_test.dll which is checked into # base/test/data/pe_image. # Set /SUBSYSTEM:WINDOWS | 1.450447 | 1 |
solutions/PE4.py | KerimovEmil/ProjectEuler | 1 | 892 | """
PROBLEM
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers
is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
ANSWER:
906609
Solve time ~ 0.760 seconds
"""
from itertools import product
import unittest
from util.utils import timeit
class Problem4:
def __init__(self, num_digits):
self.lower = 10 ** (num_digits - 1) - 1
self.upper = 10 ** num_digits - 1
@staticmethod
def is_palindrome(num):
return str(num) == str(num)[::-1]
@timeit
def solve(self):
pds = []
for i, j in product(range(self.lower, self.upper), repeat=2):
if self.is_palindrome(i * j):
pds.append(i * j)
return max(pds)
class Solution4(unittest.TestCase):
def setUp(self):
self.problem = Problem4(3)
def test_solution(self):
self.assertEqual(906609, self.problem.solve())
if __name__ == '__main__':
unittest.main()
| """
PROBLEM
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers
is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
ANSWER:
906609
Solve time ~ 0.760 seconds
"""
from itertools import product
import unittest
from util.utils import timeit
class Problem4:
def __init__(self, num_digits):
self.lower = 10 ** (num_digits - 1) - 1
self.upper = 10 ** num_digits - 1
@staticmethod
def is_palindrome(num):
return str(num) == str(num)[::-1]
@timeit
def solve(self):
pds = []
for i, j in product(range(self.lower, self.upper), repeat=2):
if self.is_palindrome(i * j):
pds.append(i * j)
return max(pds)
class Solution4(unittest.TestCase):
def setUp(self):
self.problem = Problem4(3)
def test_solution(self):
self.assertEqual(906609, self.problem.solve())
if __name__ == '__main__':
unittest.main()
| en | 0.888968 | PROBLEM A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99. Find the largest palindrome made from the product of two 3-digit numbers. ANSWER: 906609 Solve time ~ 0.760 seconds | 4.111977 | 4 |
indexclient/parsers/info.py | uc-cdis/indexclient | 2 | 893 | import sys
import json
import logging
import argparse
import warnings
import requests
from indexclient import errors
# DEPRECATED 11/2019 -- interacts with old `/alias/` endpoint.
# For creating aliases for indexd records, prefer using
# the `add_alias` function, which interacts with the new
# `/index/{GUID}/aliases` endpoint.
def info(host, port, name, **kwargs):
"""
Retrieve info by name.
"""
warnings.warn(
(
"This function is deprecated. For creating aliases for indexd "
"records, prefer using the `add_alias_for_did` function, which "
"interacts with the new `/index/{GUID}/aliases` endpoint."
),
DeprecationWarning,
)
resource = "http://{host}:{port}/alias/{name}".format(
host=host, port=port, name=name
)
res = requests.get(resource)
try:
res.raise_for_status()
except Exception as err:
raise errors.BaseIndexError(res.status_code, res.text)
try:
doc = res.json()
except ValueError as err:
reason = json.dumps({"error": "invalid json payload returned"})
raise errors.BaseIndexError(res.status_code, reason)
sys.stdout.write(json.dumps(doc))
def config(parser):
"""
Configure the info command.
"""
parser.set_defaults(func=info)
parser.add_argument("name", help="name of information to retrieve")
| import sys
import json
import logging
import argparse
import warnings
import requests
from indexclient import errors
# DEPRECATED 11/2019 -- interacts with old `/alias/` endpoint.
# For creating aliases for indexd records, prefer using
# the `add_alias` function, which interacts with the new
# `/index/{GUID}/aliases` endpoint.
def info(host, port, name, **kwargs):
"""
Retrieve info by name.
"""
warnings.warn(
(
"This function is deprecated. For creating aliases for indexd "
"records, prefer using the `add_alias_for_did` function, which "
"interacts with the new `/index/{GUID}/aliases` endpoint."
),
DeprecationWarning,
)
resource = "http://{host}:{port}/alias/{name}".format(
host=host, port=port, name=name
)
res = requests.get(resource)
try:
res.raise_for_status()
except Exception as err:
raise errors.BaseIndexError(res.status_code, res.text)
try:
doc = res.json()
except ValueError as err:
reason = json.dumps({"error": "invalid json payload returned"})
raise errors.BaseIndexError(res.status_code, reason)
sys.stdout.write(json.dumps(doc))
def config(parser):
"""
Configure the info command.
"""
parser.set_defaults(func=info)
parser.add_argument("name", help="name of information to retrieve")
| en | 0.661565 | # DEPRECATED 11/2019 -- interacts with old `/alias/` endpoint. # For creating aliases for indexd records, prefer using # the `add_alias` function, which interacts with the new # `/index/{GUID}/aliases` endpoint. Retrieve info by name. Configure the info command. | 2.574187 | 3 |
email-worker-compose/app/sender.py | guilhermebc/docker-playground | 1 | 894 | <reponame>guilhermebc/docker-playground<filename>email-worker-compose/app/sender.py
import psycopg2
import redis
import json
from bottle import Bottle, request
class Sender(Bottle):
def __init__(self):
super().__init__()
self.route('/', method='POST', callback=self.send)
self.fila = redis.StrictRedis(host='queue', port=6379, db=0)
DSN = 'dbname=email_sender user=postgress host=db'
self.conn = psycopg2.connect(DSN)
def register_message(self, assunto, mensagem):
SQL = 'INSERT INTO emails (assunto, mensagem) VALUES (%s, %s)'
cur = self.conn.cursor()
cur.execute(SQL, (assunto, mensagem))
self.conn.commit()
cur.close()
msg = {'assunto': assunto, 'mensagem': mensagem}
self.fila.rpush('sender', json.dumps(msg))
print('Message registered!')
def send(self):
assunto = request.forms.get('assunto')
mensagem = request.forms.get('mensagem')
self.register_message(assunto, mensagem)
return 'Message queued! Assunto: {} Mensage: {}'.format(
assunto, mensagem
)
if __name__ == '__main__':
sender = Sender()
sender.run(host='0.0.0.0', port=8080, debug=True) | import psycopg2
import redis
import json
from bottle import Bottle, request
class Sender(Bottle):
def __init__(self):
super().__init__()
self.route('/', method='POST', callback=self.send)
self.fila = redis.StrictRedis(host='queue', port=6379, db=0)
DSN = 'dbname=email_sender user=postgress host=db'
self.conn = psycopg2.connect(DSN)
def register_message(self, assunto, mensagem):
SQL = 'INSERT INTO emails (assunto, mensagem) VALUES (%s, %s)'
cur = self.conn.cursor()
cur.execute(SQL, (assunto, mensagem))
self.conn.commit()
cur.close()
msg = {'assunto': assunto, 'mensagem': mensagem}
self.fila.rpush('sender', json.dumps(msg))
print('Message registered!')
def send(self):
assunto = request.forms.get('assunto')
mensagem = request.forms.get('mensagem')
self.register_message(assunto, mensagem)
return 'Message queued! Assunto: {} Mensage: {}'.format(
assunto, mensagem
)
if __name__ == '__main__':
sender = Sender()
sender.run(host='0.0.0.0', port=8080, debug=True) | none | 1 | 2.389845 | 2 |
|
tests/ximpl.py | zsimic/sandbox | 0 | 895 | <reponame>zsimic/sandbox
import click
import poyo
import ruamel.yaml
import runez
import strictyaml
import yaml as pyyaml
from zyaml import load_path, load_string, tokens_from_path, tokens_from_string
from zyaml.marshal import decode, default_marshal, represented_scalar
from . import TestSettings
class ImplementationCollection(object):
def __init__(self, names, default="zyaml,ruamel"):
av = [ZyamlImplementation, RuamelImplementation, PyyamlBaseImplementation, PoyoImplementation, StrictImplementation]
self.available = dict((m.name, m()) for m in av)
self.unknown = []
self.selected = []
if names.startswith("+"):
names = "%s,%s" % (names[1:], default)
names = [s.strip() for s in names.split(",")]
names = [s for s in names if s]
seen = {}
for name in names:
found = 0
for i in self.available.values():
if name == "all" or name in i.name:
if i.name not in seen:
seen[i.name] = True
self.selected.append(i)
found += 1
if found == 0:
self.unknown.append(name)
self.combinations = None
def track_result_combination(self, impl, data):
if isinstance(data, Exception):
value = runez.stringified(data)
else:
value = runez.represented_json(data, stringify=decode, keep_none=True, none_key="-null-")
name = impl.name
if self.combinations is None:
self.combinations = {}
for i1 in self.selected:
for i2 in self.selected:
if i1.name < i2.name:
self.combinations[(i1.name, i2.name)] = set()
for names, values in self.combinations.items():
if name in names:
values.add(value)
def __repr__(self):
return ",".join(str(i) for i in self.selected)
def __len__(self):
return len(self.selected)
def __iter__(self):
for i in self.selected:
yield i
class Implementation(object):
"""Implementation of loading a yml file"""
name = None # type: str
def __repr__(self):
return self.name
@classmethod
def option(cls, default="zyaml,ruamel", count=None, **kwargs):
"""
Args:
default (str | None): Default implementation(s) to use
count (int | None): Optional: exact number of implementations that have to specified
**kwargs: Passed-through to click
"""
kwargs["default"] = default
def _callback(_ctx, _param, value):
if not value:
return None
impls = ImplementationCollection(value, default=default)
if impls.unknown:
raise click.BadParameter("Unknown implementation(s): %s" % ", ".join(impls.unknown))
if count and len(impls) != count:
if count == 1:
raise click.BadParameter("Need exactly 1 implementation")
raise click.BadParameter("Need exactly %s" % runez.plural(count, "implementation"))
if count == 1:
return impls.selected[0]
return impls
metavar = "I1,..."
hlp = "Implementation(s)"
if count:
hlp = runez.plural(count, "implementation")
metavar = ",".join("I%s" % (i + 1) for i in range(count))
kwargs.setdefault("help", "%s to use" % hlp)
kwargs.setdefault("show_default", True)
kwargs.setdefault("metavar", metavar)
name = "implementation" if count == 1 else "implementations"
return click.option(name, "-i", callback=_callback, **kwargs)
def show_result(self, data, tokens=False):
rtype = "tokens" if tokens else data.__class__.__name__ if data is not None else "None"
rep = data
if not tokens or isinstance(data, Exception):
rep = TestSettings.represented(data)
message = "---- %s: %s" % (runez.bold(self.name), runez.dim(rtype))
if isinstance(data, NotImplementedError):
print("%s - %s" % (message, rep))
return
print(message)
print(rep)
def get_outcome(self, content, tokens=False):
if tokens:
data = self.tokens(content)
if isinstance(data, list):
data = "\n".join(self.represented_token(t) for t in data)
return data
return self.deserialized(content)
def deserialized(self, source):
value = TestSettings.protected_call(self._deserialized, source)
return self._simplified(value)
def tokens(self, source):
return TestSettings.protected_call(self._tokenize, source)
def represented_token(self, token):
return str(token)
def _deserialized(self, source):
if hasattr(source, "path"):
return self._deserialized_from_path(source.path)
return self._deserialized_from_string(source)
def _deserialized_from_path(self, path):
with open(path) as fh:
return self._deserialized_from_string(fh.read())
def _deserialized_from_string(self, source):
raise NotImplementedError()
def _tokenize(self, source):
if hasattr(source, "path"):
return self._tokens_from_path(source.path)
return self._tokens_from_string(source)
def _tokens_from_path(self, path):
with open(path) as fh:
return TestSettings.unwrapped(self._tokens_from_string(fh.read()))
def _tokens_from_string(self, source):
raise NotImplementedError()
def _simplified(self, value):
if isinstance(value, list) and len(value) == 1:
return value[0]
return value
class ZyamlImplementation(Implementation):
name = "zyaml"
def _deserialized_from_path(self, path):
return load_path(path)
def _deserialized_from_string(self, source):
return load_string(source)
def _tokens_from_path(self, path):
return tokens_from_path(path)
def _tokens_from_string(self, source):
return tokens_from_string(source)
def _simplified(self, value):
return value
def ruamel_passthrough_tags(loader, tag, node):
name = node.__class__.__name__
if "Seq" in name:
result = []
for v in node.value:
result.append(ruamel_passthrough_tags(loader, tag, v))
return result
if "Map" in name:
result = {}
for k, v in node.value:
k = ruamel_passthrough_tags(loader, tag, k)
v = ruamel_passthrough_tags(loader, tag, v)
result[k] = v
return result
return default_marshal(node.value)
class RuamelImplementation(Implementation):
name = "ruamel"
def _deserialized_from_string(self, source):
y = ruamel.yaml.YAML(typ="safe")
ruamel.yaml.add_multi_constructor("", ruamel_passthrough_tags, Loader=ruamel.yaml.SafeLoader)
return y.load_all(source)
def _tokens_from_string(self, source):
return ruamel.yaml.main.scan(source)
class PyyamlBaseImplementation(Implementation):
name = "pyyaml"
def _deserialized_from_string(self, source):
return pyyaml.load_all(source, Loader=pyyaml.BaseLoader)
def _tokens_from_string(self, source):
yaml_loader = pyyaml.BaseLoader(source)
curr = yaml_loader.get_token()
while curr is not None:
yield curr
curr = yaml_loader.get_token()
def represented_token(self, token):
linenum = token.start_mark.line + 1
column = token.start_mark.column + 1
result = "%s[%s,%s]" % (token.__class__.__name__, linenum, column)
value = getattr(token, "value", None)
if value is not None:
if token.id == "<scalar>":
value = represented_scalar(token.style, value)
elif token.id == "<anchor>":
value = "&%s" % value
elif token.id == "<alias>":
value = "*%s" % value
elif token.id == "<tag>":
assert isinstance(value, tuple)
value = " ".join(str(s) for s in runez.flattened(value))
elif token.id == "<directive>":
result += " %s" % token.name
value = " ".join(str(s) for s in runez.flattened(value))
else:
assert False
result = "%s %s" % (result, value)
return result
class PoyoImplementation(Implementation):
name = "poyo"
def _deserialized_from_string(self, source):
return [poyo.parse_string(source)]
class StrictImplementation(Implementation):
name = "strict"
def _deserialized_from_string(self, source):
obj = strictyaml.dirty_load(source, allow_flow_style=True)
return obj.data
| import click
import poyo
import ruamel.yaml
import runez
import strictyaml
import yaml as pyyaml
from zyaml import load_path, load_string, tokens_from_path, tokens_from_string
from zyaml.marshal import decode, default_marshal, represented_scalar
from . import TestSettings
class ImplementationCollection(object):
def __init__(self, names, default="zyaml,ruamel"):
av = [ZyamlImplementation, RuamelImplementation, PyyamlBaseImplementation, PoyoImplementation, StrictImplementation]
self.available = dict((m.name, m()) for m in av)
self.unknown = []
self.selected = []
if names.startswith("+"):
names = "%s,%s" % (names[1:], default)
names = [s.strip() for s in names.split(",")]
names = [s for s in names if s]
seen = {}
for name in names:
found = 0
for i in self.available.values():
if name == "all" or name in i.name:
if i.name not in seen:
seen[i.name] = True
self.selected.append(i)
found += 1
if found == 0:
self.unknown.append(name)
self.combinations = None
def track_result_combination(self, impl, data):
if isinstance(data, Exception):
value = runez.stringified(data)
else:
value = runez.represented_json(data, stringify=decode, keep_none=True, none_key="-null-")
name = impl.name
if self.combinations is None:
self.combinations = {}
for i1 in self.selected:
for i2 in self.selected:
if i1.name < i2.name:
self.combinations[(i1.name, i2.name)] = set()
for names, values in self.combinations.items():
if name in names:
values.add(value)
def __repr__(self):
return ",".join(str(i) for i in self.selected)
def __len__(self):
return len(self.selected)
def __iter__(self):
for i in self.selected:
yield i
class Implementation(object):
"""Implementation of loading a yml file"""
name = None # type: str
def __repr__(self):
return self.name
@classmethod
def option(cls, default="zyaml,ruamel", count=None, **kwargs):
"""
Args:
default (str | None): Default implementation(s) to use
count (int | None): Optional: exact number of implementations that have to specified
**kwargs: Passed-through to click
"""
kwargs["default"] = default
def _callback(_ctx, _param, value):
if not value:
return None
impls = ImplementationCollection(value, default=default)
if impls.unknown:
raise click.BadParameter("Unknown implementation(s): %s" % ", ".join(impls.unknown))
if count and len(impls) != count:
if count == 1:
raise click.BadParameter("Need exactly 1 implementation")
raise click.BadParameter("Need exactly %s" % runez.plural(count, "implementation"))
if count == 1:
return impls.selected[0]
return impls
metavar = "I1,..."
hlp = "Implementation(s)"
if count:
hlp = runez.plural(count, "implementation")
metavar = ",".join("I%s" % (i + 1) for i in range(count))
kwargs.setdefault("help", "%s to use" % hlp)
kwargs.setdefault("show_default", True)
kwargs.setdefault("metavar", metavar)
name = "implementation" if count == 1 else "implementations"
return click.option(name, "-i", callback=_callback, **kwargs)
def show_result(self, data, tokens=False):
rtype = "tokens" if tokens else data.__class__.__name__ if data is not None else "None"
rep = data
if not tokens or isinstance(data, Exception):
rep = TestSettings.represented(data)
message = "---- %s: %s" % (runez.bold(self.name), runez.dim(rtype))
if isinstance(data, NotImplementedError):
print("%s - %s" % (message, rep))
return
print(message)
print(rep)
def get_outcome(self, content, tokens=False):
if tokens:
data = self.tokens(content)
if isinstance(data, list):
data = "\n".join(self.represented_token(t) for t in data)
return data
return self.deserialized(content)
def deserialized(self, source):
value = TestSettings.protected_call(self._deserialized, source)
return self._simplified(value)
def tokens(self, source):
return TestSettings.protected_call(self._tokenize, source)
def represented_token(self, token):
return str(token)
def _deserialized(self, source):
if hasattr(source, "path"):
return self._deserialized_from_path(source.path)
return self._deserialized_from_string(source)
def _deserialized_from_path(self, path):
with open(path) as fh:
return self._deserialized_from_string(fh.read())
def _deserialized_from_string(self, source):
raise NotImplementedError()
def _tokenize(self, source):
if hasattr(source, "path"):
return self._tokens_from_path(source.path)
return self._tokens_from_string(source)
def _tokens_from_path(self, path):
with open(path) as fh:
return TestSettings.unwrapped(self._tokens_from_string(fh.read()))
def _tokens_from_string(self, source):
raise NotImplementedError()
def _simplified(self, value):
if isinstance(value, list) and len(value) == 1:
return value[0]
return value
class ZyamlImplementation(Implementation):
name = "zyaml"
def _deserialized_from_path(self, path):
return load_path(path)
def _deserialized_from_string(self, source):
return load_string(source)
def _tokens_from_path(self, path):
return tokens_from_path(path)
def _tokens_from_string(self, source):
return tokens_from_string(source)
def _simplified(self, value):
return value
def ruamel_passthrough_tags(loader, tag, node):
name = node.__class__.__name__
if "Seq" in name:
result = []
for v in node.value:
result.append(ruamel_passthrough_tags(loader, tag, v))
return result
if "Map" in name:
result = {}
for k, v in node.value:
k = ruamel_passthrough_tags(loader, tag, k)
v = ruamel_passthrough_tags(loader, tag, v)
result[k] = v
return result
return default_marshal(node.value)
class RuamelImplementation(Implementation):
name = "ruamel"
def _deserialized_from_string(self, source):
y = ruamel.yaml.YAML(typ="safe")
ruamel.yaml.add_multi_constructor("", ruamel_passthrough_tags, Loader=ruamel.yaml.SafeLoader)
return y.load_all(source)
def _tokens_from_string(self, source):
return ruamel.yaml.main.scan(source)
class PyyamlBaseImplementation(Implementation):
name = "pyyaml"
def _deserialized_from_string(self, source):
return pyyaml.load_all(source, Loader=pyyaml.BaseLoader)
def _tokens_from_string(self, source):
yaml_loader = pyyaml.BaseLoader(source)
curr = yaml_loader.get_token()
while curr is not None:
yield curr
curr = yaml_loader.get_token()
def represented_token(self, token):
linenum = token.start_mark.line + 1
column = token.start_mark.column + 1
result = "%s[%s,%s]" % (token.__class__.__name__, linenum, column)
value = getattr(token, "value", None)
if value is not None:
if token.id == "<scalar>":
value = represented_scalar(token.style, value)
elif token.id == "<anchor>":
value = "&%s" % value
elif token.id == "<alias>":
value = "*%s" % value
elif token.id == "<tag>":
assert isinstance(value, tuple)
value = " ".join(str(s) for s in runez.flattened(value))
elif token.id == "<directive>":
result += " %s" % token.name
value = " ".join(str(s) for s in runez.flattened(value))
else:
assert False
result = "%s %s" % (result, value)
return result
class PoyoImplementation(Implementation):
name = "poyo"
def _deserialized_from_string(self, source):
return [poyo.parse_string(source)]
class StrictImplementation(Implementation):
name = "strict"
def _deserialized_from_string(self, source):
obj = strictyaml.dirty_load(source, allow_flow_style=True)
return obj.data | en | 0.676207 | Implementation of loading a yml file # type: str Args: default (str | None): Default implementation(s) to use count (int | None): Optional: exact number of implementations that have to specified **kwargs: Passed-through to click | 2.056324 | 2 |
mummi_ras/online/aa/aa_get_tiltrot_z_state.py | mummi-framework/mummi-ras | 4 | 896 | ###############################################################################
# @todo add Pilot2-splash-app disclaimer
###############################################################################
""" Get's KRAS states """
import MDAnalysis as mda
from MDAnalysis.analysis import align
from MDAnalysis.lib.mdamath import make_whole
import os
import numpy as np
import math
############## Below section needs to be uncommented ############
import mummi_core
import mummi_ras
from mummi_core.utils import Naming
# # Logger has to be initialized the first thing in the script
from logging import getLogger
LOGGER = getLogger(__name__)
# # Innitilize MuMMI if it has not been done before
# MUMMI_ROOT = mummi.init(True)
# This is needed so the Naming works below
#@TODO fix this so we don't have these on import make them as an init
mummi_core.init()
dirKRASStates = Naming.dir_res('states')
dirKRASStructures = Naming.dir_res('structures')
# #RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-ONLY.microstates.txt"))
RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-states.txt"),comments='#')
# #RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-RAF.microstates.txt"))
RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-raf-states.txt"),comments='#') # Note diffrent number of columns so index change below
# TODO: CS, my edits to test
# RAS_ONLY_macrostate = np.loadtxt('ras-states.txt')
# RAS_RAF_macrostate = np.loadtxt('ras-raf-states.txt')
############## above section needs to be uncommented ############
# TODO: CS, my edits to test
# TODO: TSC, The reference structure has to currently be set as the 'RAS-ONLY-reference-structure.gro'
# TODO: TSC, path to the reference structure is: mummi_resources/structures/
kras_ref_universe = mda.Universe(os.path.join(dirKRASStructures, "RAS-ONLY-reference-structure.gro"))
# kras_ref_universe = mda.Universe("RAS-ONLY-reference-structure.gro")
# kras_ref_universe = mda.Universe('AA_pfpatch_000000004641_RAS_RAF2_411.gro')
# TODO: CS, not using these for x4 proteins; instead using protein_systems below to set num_res
######### Below hard codes the number of residues within RAS-only and RAS-RAF ##########
RAS_only_num_res = 184
RAS_RAF_num_res = 320
######### Above hard codes the number of residues within RAS-only and RAS-RAF ##########
####### This can be removed
# def get_kras(syst, kras_start):
# """Gets all atoms for a KRAS protein starting at 'kras_start'."""
# return syst.atoms[kras_start:kras_start+428]
####### This can be removed
def get_segids(u):
"""Identifies the list of segments within the system. Only needs to be called x1 time"""
segs = u.segments
segs = segs.segids
ras_segids = []
rasraf_segids = []
for i in range(len(segs)):
# print(segs[i])
if segs[i][-3:] == 'RAS':
ras_segids.append(segs[i])
if segs[i][-3:] == 'RAF':
rasraf_segids.append(segs[i])
return ras_segids, rasraf_segids
def get_protein_info(u,tag):
"""Uses the segments identified in get_segids to make a list of all proteins in the systems.\
Outputs a list of the first residue number of the protein, and whether it is 'RAS-ONLY', or 'RAS-RAF'.\
The 'tag' input defines what is used to identify the first residue of the protein. i.e. 'resname ACE1 and name BB'.\
Only needs to be called x1 time"""
ras_segids, rasraf_segids = get_segids(u)
if len(ras_segids) > 0:
RAS = u.select_atoms('segid '+ras_segids[0]+' and '+str(tag))
else:
RAS = []
if len(rasraf_segids) > 0:
RAF = u.select_atoms('segid '+rasraf_segids[0]+' and '+str(tag))
else:
RAF = []
protein_info = []#np.empty([len(RAS)+len(RAF),2])
for i in range(len(RAS)):
protein_info.append((RAS[i].resid,'RAS-ONLY'))
for i in range(len(RAF)):
protein_info.append((RAF[i].resid,'RAS-RAF'))
######## sort protein info
protein_info = sorted(protein_info)
######## sort protein info
return protein_info
def get_ref_kras():
"""Gets the reference KRAS struct. Only called x1 time when class is loaded"""
start_of_g_ref = kras_ref_universe.residues[0].resid
ref_selection = 'resid '+str(start_of_g_ref)+':'+str(start_of_g_ref+24)+' ' +\
str(start_of_g_ref+38)+':'+str(start_of_g_ref+54)+' ' +\
str(start_of_g_ref+67)+':'+str(start_of_g_ref+164)+' ' +\
'and (name CA or name BB)'
r2_26r40_56r69_166_ref = kras_ref_universe.select_atoms(str(ref_selection))
return kras_ref_universe.select_atoms(str(ref_selection)).positions - kras_ref_universe.select_atoms(str(ref_selection)).center_of_mass()
# Load inital ref frames (only need to do this once)
ref0 = get_ref_kras()
def getKRASstates(u,kras_indices):
"""Gets states for all KRAS proteins in path."""
# res_shift = 8
# all_glycine = u.select_atoms("resname GLY")
# kras_indices = []
# for i in range(0, len(all_glycine), 26):
# kras_indices.append(all_glycine[i].index)
########## Below is taken out of the function so it is only done once #########
# kras_indices = get_protein_info(u,'resname ACE1 and name BB')
########## Above is taken out of the function so it is only done once #########
# CS, for x4 cases:
# [{protein_x4: (protein_type, num_res)}]
protein_systems = [{'ras4a': ('RAS-ONLY', 185),
'ras4araf': ('RAS-RAF', 321),
'ras': ('RAS-ONLY', 184),
'rasraf': ('RAS-RAF', 320)}]
ALLOUT = []
for k in range(len(kras_indices)):
start_of_g = kras_indices[k][0]
protein_x4 = str(kras_indices[k][1])
try:
protein_type = [item[protein_x4] for item in protein_systems][0][0] # 'RAS-ONLY' OR 'RAS-RAF'
num_res = [item[protein_x4] for item in protein_systems][0][1]
except:
LOGGER.error('Check KRas naming between modules')
raise Exception('Error: unknown KRas name')
# TODO: CS, replacing this comment section with the above, to handle x4 protein types
# ---------------------------------------
# ALLOUT = []
# for k in range(len(kras_indices)):
# start_of_g = kras_indices[k][0]
# protein_type = str(kras_indices[k][1])
# ########## BELOW SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
# ########## POTENTIALLY REDO WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) #######
# ########## HAS BEEN REDONE WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ########
# # if len(kras_indices) == 1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') ####### HAS TO BE FIXED FOR BACKBONE ATOMS FOR SPECIFIC PROTEIN
# # elif len(kras_indices) > 1:
# # if k == len(kras_indices)-1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB')
# # else:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(kras_indices[k+1][0])+' and name BB')
# ########## ABOVE SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
#
# ########## Below hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# if protein_type == 'RAS-ONLY':
# num_res = RAS_only_num_res
# elif protein_type == 'RAS-RAF':
# num_res = RAS_RAF_num_res
# ########## Above hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# ---------------------------------------
# TODO: TSC, I changed the selection below, which can be used for the make_whole...
# krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)+' and (name CA or name BB)')
krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res))
krases0_BB.guess_bonds()
r2_26r40_56r69_166 = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+24)+' ' +\
str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+\
' and (name CA or name BB)')
u_selection = \
'resid '+str(start_of_g)+':'+str(start_of_g+24)+' '+str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+' and (name CA or name BB)'
mobile0 = u.select_atoms(str(u_selection)).positions - u.select_atoms(str(u_selection)).center_of_mass()
# TODO: CS, something wrong with ref0 from get_kras_ref()
# just making ref0 = mobile0 to test for now
# ref0 = mobile0
# TSC removed this
R, RMSD_junk = align.rotation_matrix(mobile0, ref0)
######## TODO: TSC, Adjusted for AA lipid names ########
# lipids = u.select_atoms('resname POPX POPC PAPC POPE DIPE DPSM PAPS PAP6 CHOL')
lipids = u.select_atoms('resname POPC PAPC POPE DIPE SSM PAPS SAPI CHL1')
coords = ref0
RotMat = []
OS = []
r152_165 = krases0_BB.select_atoms('resid '+str(start_of_g+150)+':'+str(start_of_g+163)+' and (name CA or name BB)')
r65_74 = krases0_BB.select_atoms('resid '+str(start_of_g+63)+':'+str(start_of_g+72)+' and (name CA or name BB)')
timeframes = []
# TODO: CS, for AA need bonds to run make_whole()
# krases0_BB.guess_bonds()
# TODO: CS, turn off for now to test beyond this point
''' *** for AA, need to bring that back on once all else runs ***
'''
# @Tim and <NAME>. this was commented out - please check.
#make_whole(krases0_BB)
j, rmsd_junk = mda.analysis.align.rotation_matrix((r2_26r40_56r69_166.positions-r2_26r40_56r69_166.center_of_mass()), coords)
RotMat.append(j)
OS.append(r65_74.center_of_mass()-r152_165.center_of_mass())
timeframes.append(u.trajectory.time)
if protein_type == 'RAS-RAF':
z_pos = []
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES BELOW ####################
############### TODO: TSC, zshifting is set to -1 (instead of -2), as there are ACE caps that are separate residues in AA
#zshifting=-1
if protein_x4 == 'rasraf':
zshifting = -1
elif protein_x4 == 'ras4araf':
zshifting = 0
else:
zshifting = 0
LOGGER.error('Found unsupported protein_x4 type')
raf_loops_selection = u.select_atoms('resid '+str(start_of_g+zshifting+291)+':'+str(start_of_g+zshifting+294)+' ' +\
str(start_of_g+zshifting+278)+':'+str(start_of_g+zshifting+281)+' ' +\
' and (name CA or name BB)')
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES ABOVE ####################
diff = (lipids.center_of_mass()[2]-raf_loops_selection.center_of_mass(unwrap=True)[2])/10
if diff < 0:
diff = diff+(u.dimensions[2]/10)
z_pos.append(diff)
z_pos = np.array(z_pos)
RotMatNP = np.array(RotMat)
OS = np.array(OS)
OA = RotMatNP[:, 2, :]/(((RotMatNP[:, 2, 0]**2)+(RotMatNP[:, 2, 1]**2)+(RotMatNP[:, 2, 2]**2))**0.5)[:, None]
OWAS = np.arccos(RotMatNP[:, 2, 2])*180/math.pi
OC_temp = np.concatenate((OA, OS), axis=1)
t = ((OC_temp[:, 0]*OC_temp[:, 3])+(OC_temp[:, 1]*OC_temp[:, 4]) +
(OC_temp[:, 2]*OC_temp[:, 5]))/((OC_temp[:, 0]**2)+(OC_temp[:, 1]**2)+(OC_temp[:, 2]**2))
OC = OA*t[:, None]
ORS_tp = np.concatenate((OC, OS), axis=1)
ORS_norm = (((ORS_tp[:, 3]-ORS_tp[:, 0])**2)+((ORS_tp[:, 4]-ORS_tp[:, 1])**2)+((ORS_tp[:, 5]-ORS_tp[:, 2])**2))**0.5
ORS = (OS - OC)/ORS_norm[:, None]
OACRS = np.cross(OA, ORS)
OZCA = OA * OA[:, 2][:, None]
Z_unit = np.full([len(OZCA), 3], 1)
Z_adjust = np.array([0, 0, 1])
Z_unit = Z_unit*Z_adjust
Z_OZCA = Z_unit-OZCA
OZPACB = Z_OZCA/((Z_OZCA[:, 0]**2+Z_OZCA[:, 1]**2+Z_OZCA[:, 2]**2)**0.5)[:, None]
OROTNOTSIGNED = np.zeros([len(ORS)])
for i in range(len(ORS)):
OROTNOTSIGNED[i] = np.arccos(np.dot(OZPACB[i, :], ORS[i, :]) /
(np.sqrt(np.dot(OZPACB[i, :], OZPACB[i, :]))) *
(np.sqrt(np.dot(ORS[i, :], ORS[i, :]))))*180/math.pi
OZPACBCRS_cross = np.cross(OZPACB, ORS)
OZPACBCRS = OZPACBCRS_cross/((OZPACBCRS_cross[:, 0]**2+OZPACBCRS_cross[:, 1]**2+OZPACBCRS_cross[:, 2]**2)**0.5)[:, None]
OFORSIGN_temp = (OA - OZPACBCRS)**2
OFORSIGN = OFORSIGN_temp[:, 0]+OFORSIGN_temp[:, 1]+OFORSIGN_temp[:, 2]
OROT = OROTNOTSIGNED
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = -(OROT[i])
for i in range(len(OROT)):
if OFORSIGN[i] < 0.25:
OROT[i] = -(OROT[i])
###### Below introduces new shift to account for upper vs. lower leaflet #####
for i in range(len(OWAS)):
OWAS[i] = abs(-(OWAS[i])+180) # made this an absolute value so that the tilt remains positive
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = OROT[i]+180
elif OROT[i] > 0:
OROT[i] = OROT[i]-180
###### Above introduces new shift to account for upper vs. lower leaflet #####
###### Below might have to be updated to take into account the periodic nature of the rotation ######
if protein_type == 'RAS-ONLY':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
diff0 = []
for i in range(len(RAS_ONLY_macrostate)):
#diff0.append([((RAS_ONLY_macrostate[i,0]-OWAS[j])**2+(RAS_ONLY_macrostate[i,1]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,6]])
diff0.append([((RAS_ONLY_macrostate[i,1]-OWAS[j])**2+(RAS_ONLY_macrostate[i,0]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,5]])
diff0.sort()
states[j] = diff0[0][1]
elif protein_type == 'RAS-RAF':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
### below: adding in the requirements for the 'high-z' state ###
if (OROT[j] < -45 or OROT[j] > 140) and z_pos[j] > 4.8:
states[j] = 3
else:
### above: adding in the requirements for the 'high-z' state ###
diff0 = []
for i in range(len(RAS_RAF_macrostate)):
#diff0.append([((RAS_RAF_macrostate[i,0]-OWAS[j])**2+(RAS_RAF_macrostate[i,1]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,6]])
diff0.append([((RAS_RAF_macrostate[i,1]-OWAS[j])**2+(RAS_RAF_macrostate[i,0]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,4]])
diff0.sort()
states[j] = diff0[0][1]
###### Above might have to be updated to take into account the periodic nature of the rotation ######
###### Assume we want to remove this? Where is the code that reads this information? i.e. will there be knock-on effects? ######
###### If feedback code needs index 5 (two_states) from the output, deleting this four_states will shift that to index 4 #######
# four_states = np.zeros(len(OROT))
# for j in range(len(OROT)):
# diff0 = []
# for i in range(len(macrostate4)):
# diff0.append([((macrostate4[i,0]-OWAS[j])**2+(macrostate4[i,1]-OROT[j])**2)**0.5, macrostate4[i,6]])
# diff0.sort()
# four_states[j] = diff0[0][1]+1
###### below: old output details.... ######################################
###### Updated - RAS-only to NOT HAVE the Z-distance ######################
###### Updated - Added in the protein 'tag', i.e. RAS-ONLY or RAS-RAF #####
# OUTPUT = np.zeros([len(OROT), 6])
# for i in range(len(OROT)):
# OUTPUT[i] = timeframes[i], OWAS[i], OROT[i], z_pos[i], four_states[i], two_states[i]
###### above: old output details.... ######################################
###### below: NEW output details.... ######################################
if protein_type == 'RAS-ONLY':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], 'n/a', int(states[i])
elif protein_type == 'RAS-RAF':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], z_pos[i], int(states[i])
ALLOUT.append(OUTPUT)
return np.asarray(ALLOUT)
#np.savetxt(str(tpr)+"_tilt_rot_z_state.KRAS_"+str(k+1)+".txt", OUTPUT, fmt=['%i','%10.3f','%10.3f','%10.3f','%i','%i'], delimiter=' ')
| ###############################################################################
# @todo add Pilot2-splash-app disclaimer
###############################################################################
""" Get's KRAS states """
import MDAnalysis as mda
from MDAnalysis.analysis import align
from MDAnalysis.lib.mdamath import make_whole
import os
import numpy as np
import math
############## Below section needs to be uncommented ############
import mummi_core
import mummi_ras
from mummi_core.utils import Naming
# # Logger has to be initialized the first thing in the script
from logging import getLogger
LOGGER = getLogger(__name__)
# # Innitilize MuMMI if it has not been done before
# MUMMI_ROOT = mummi.init(True)
# This is needed so the Naming works below
#@TODO fix this so we don't have these on import make them as an init
mummi_core.init()
dirKRASStates = Naming.dir_res('states')
dirKRASStructures = Naming.dir_res('structures')
# #RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-ONLY.microstates.txt"))
RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-states.txt"),comments='#')
# #RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-RAF.microstates.txt"))
RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-raf-states.txt"),comments='#') # Note diffrent number of columns so index change below
# TODO: CS, my edits to test
# RAS_ONLY_macrostate = np.loadtxt('ras-states.txt')
# RAS_RAF_macrostate = np.loadtxt('ras-raf-states.txt')
############## above section needs to be uncommented ############
# TODO: CS, my edits to test
# TODO: TSC, The reference structure has to currently be set as the 'RAS-ONLY-reference-structure.gro'
# TODO: TSC, path to the reference structure is: mummi_resources/structures/
kras_ref_universe = mda.Universe(os.path.join(dirKRASStructures, "RAS-ONLY-reference-structure.gro"))
# kras_ref_universe = mda.Universe("RAS-ONLY-reference-structure.gro")
# kras_ref_universe = mda.Universe('AA_pfpatch_000000004641_RAS_RAF2_411.gro')
# TODO: CS, not using these for x4 proteins; instead using protein_systems below to set num_res
######### Below hard codes the number of residues within RAS-only and RAS-RAF ##########
RAS_only_num_res = 184
RAS_RAF_num_res = 320
######### Above hard codes the number of residues within RAS-only and RAS-RAF ##########
####### This can be removed
# def get_kras(syst, kras_start):
# """Gets all atoms for a KRAS protein starting at 'kras_start'."""
# return syst.atoms[kras_start:kras_start+428]
####### This can be removed
def get_segids(u):
"""Identifies the list of segments within the system. Only needs to be called x1 time"""
segs = u.segments
segs = segs.segids
ras_segids = []
rasraf_segids = []
for i in range(len(segs)):
# print(segs[i])
if segs[i][-3:] == 'RAS':
ras_segids.append(segs[i])
if segs[i][-3:] == 'RAF':
rasraf_segids.append(segs[i])
return ras_segids, rasraf_segids
def get_protein_info(u,tag):
"""Uses the segments identified in get_segids to make a list of all proteins in the systems.\
Outputs a list of the first residue number of the protein, and whether it is 'RAS-ONLY', or 'RAS-RAF'.\
The 'tag' input defines what is used to identify the first residue of the protein. i.e. 'resname ACE1 and name BB'.\
Only needs to be called x1 time"""
ras_segids, rasraf_segids = get_segids(u)
if len(ras_segids) > 0:
RAS = u.select_atoms('segid '+ras_segids[0]+' and '+str(tag))
else:
RAS = []
if len(rasraf_segids) > 0:
RAF = u.select_atoms('segid '+rasraf_segids[0]+' and '+str(tag))
else:
RAF = []
protein_info = []#np.empty([len(RAS)+len(RAF),2])
for i in range(len(RAS)):
protein_info.append((RAS[i].resid,'RAS-ONLY'))
for i in range(len(RAF)):
protein_info.append((RAF[i].resid,'RAS-RAF'))
######## sort protein info
protein_info = sorted(protein_info)
######## sort protein info
return protein_info
def get_ref_kras():
"""Gets the reference KRAS struct. Only called x1 time when class is loaded"""
start_of_g_ref = kras_ref_universe.residues[0].resid
ref_selection = 'resid '+str(start_of_g_ref)+':'+str(start_of_g_ref+24)+' ' +\
str(start_of_g_ref+38)+':'+str(start_of_g_ref+54)+' ' +\
str(start_of_g_ref+67)+':'+str(start_of_g_ref+164)+' ' +\
'and (name CA or name BB)'
r2_26r40_56r69_166_ref = kras_ref_universe.select_atoms(str(ref_selection))
return kras_ref_universe.select_atoms(str(ref_selection)).positions - kras_ref_universe.select_atoms(str(ref_selection)).center_of_mass()
# Load inital ref frames (only need to do this once)
ref0 = get_ref_kras()
def getKRASstates(u,kras_indices):
"""Gets states for all KRAS proteins in path."""
# res_shift = 8
# all_glycine = u.select_atoms("resname GLY")
# kras_indices = []
# for i in range(0, len(all_glycine), 26):
# kras_indices.append(all_glycine[i].index)
########## Below is taken out of the function so it is only done once #########
# kras_indices = get_protein_info(u,'resname ACE1 and name BB')
########## Above is taken out of the function so it is only done once #########
# CS, for x4 cases:
# [{protein_x4: (protein_type, num_res)}]
protein_systems = [{'ras4a': ('RAS-ONLY', 185),
'ras4araf': ('RAS-RAF', 321),
'ras': ('RAS-ONLY', 184),
'rasraf': ('RAS-RAF', 320)}]
ALLOUT = []
for k in range(len(kras_indices)):
start_of_g = kras_indices[k][0]
protein_x4 = str(kras_indices[k][1])
try:
protein_type = [item[protein_x4] for item in protein_systems][0][0] # 'RAS-ONLY' OR 'RAS-RAF'
num_res = [item[protein_x4] for item in protein_systems][0][1]
except:
LOGGER.error('Check KRas naming between modules')
raise Exception('Error: unknown KRas name')
# TODO: CS, replacing this comment section with the above, to handle x4 protein types
# ---------------------------------------
# ALLOUT = []
# for k in range(len(kras_indices)):
# start_of_g = kras_indices[k][0]
# protein_type = str(kras_indices[k][1])
# ########## BELOW SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
# ########## POTENTIALLY REDO WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) #######
# ########## HAS BEEN REDONE WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ########
# # if len(kras_indices) == 1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') ####### HAS TO BE FIXED FOR BACKBONE ATOMS FOR SPECIFIC PROTEIN
# # elif len(kras_indices) > 1:
# # if k == len(kras_indices)-1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB')
# # else:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(kras_indices[k+1][0])+' and name BB')
# ########## ABOVE SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
#
# ########## Below hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# if protein_type == 'RAS-ONLY':
# num_res = RAS_only_num_res
# elif protein_type == 'RAS-RAF':
# num_res = RAS_RAF_num_res
# ########## Above hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# ---------------------------------------
# TODO: TSC, I changed the selection below, which can be used for the make_whole...
# krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)+' and (name CA or name BB)')
krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res))
krases0_BB.guess_bonds()
r2_26r40_56r69_166 = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+24)+' ' +\
str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+\
' and (name CA or name BB)')
u_selection = \
'resid '+str(start_of_g)+':'+str(start_of_g+24)+' '+str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+' and (name CA or name BB)'
mobile0 = u.select_atoms(str(u_selection)).positions - u.select_atoms(str(u_selection)).center_of_mass()
# TODO: CS, something wrong with ref0 from get_kras_ref()
# just making ref0 = mobile0 to test for now
# ref0 = mobile0
# TSC removed this
R, RMSD_junk = align.rotation_matrix(mobile0, ref0)
######## TODO: TSC, Adjusted for AA lipid names ########
# lipids = u.select_atoms('resname POPX POPC PAPC POPE DIPE DPSM PAPS PAP6 CHOL')
lipids = u.select_atoms('resname POPC PAPC POPE DIPE SSM PAPS SAPI CHL1')
coords = ref0
RotMat = []
OS = []
r152_165 = krases0_BB.select_atoms('resid '+str(start_of_g+150)+':'+str(start_of_g+163)+' and (name CA or name BB)')
r65_74 = krases0_BB.select_atoms('resid '+str(start_of_g+63)+':'+str(start_of_g+72)+' and (name CA or name BB)')
timeframes = []
# TODO: CS, for AA need bonds to run make_whole()
# krases0_BB.guess_bonds()
# TODO: CS, turn off for now to test beyond this point
''' *** for AA, need to bring that back on once all else runs ***
'''
# @Tim and <NAME>. this was commented out - please check.
#make_whole(krases0_BB)
j, rmsd_junk = mda.analysis.align.rotation_matrix((r2_26r40_56r69_166.positions-r2_26r40_56r69_166.center_of_mass()), coords)
RotMat.append(j)
OS.append(r65_74.center_of_mass()-r152_165.center_of_mass())
timeframes.append(u.trajectory.time)
if protein_type == 'RAS-RAF':
z_pos = []
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES BELOW ####################
############### TODO: TSC, zshifting is set to -1 (instead of -2), as there are ACE caps that are separate residues in AA
#zshifting=-1
if protein_x4 == 'rasraf':
zshifting = -1
elif protein_x4 == 'ras4araf':
zshifting = 0
else:
zshifting = 0
LOGGER.error('Found unsupported protein_x4 type')
raf_loops_selection = u.select_atoms('resid '+str(start_of_g+zshifting+291)+':'+str(start_of_g+zshifting+294)+' ' +\
str(start_of_g+zshifting+278)+':'+str(start_of_g+zshifting+281)+' ' +\
' and (name CA or name BB)')
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES ABOVE ####################
diff = (lipids.center_of_mass()[2]-raf_loops_selection.center_of_mass(unwrap=True)[2])/10
if diff < 0:
diff = diff+(u.dimensions[2]/10)
z_pos.append(diff)
z_pos = np.array(z_pos)
RotMatNP = np.array(RotMat)
OS = np.array(OS)
OA = RotMatNP[:, 2, :]/(((RotMatNP[:, 2, 0]**2)+(RotMatNP[:, 2, 1]**2)+(RotMatNP[:, 2, 2]**2))**0.5)[:, None]
OWAS = np.arccos(RotMatNP[:, 2, 2])*180/math.pi
OC_temp = np.concatenate((OA, OS), axis=1)
t = ((OC_temp[:, 0]*OC_temp[:, 3])+(OC_temp[:, 1]*OC_temp[:, 4]) +
(OC_temp[:, 2]*OC_temp[:, 5]))/((OC_temp[:, 0]**2)+(OC_temp[:, 1]**2)+(OC_temp[:, 2]**2))
OC = OA*t[:, None]
ORS_tp = np.concatenate((OC, OS), axis=1)
ORS_norm = (((ORS_tp[:, 3]-ORS_tp[:, 0])**2)+((ORS_tp[:, 4]-ORS_tp[:, 1])**2)+((ORS_tp[:, 5]-ORS_tp[:, 2])**2))**0.5
ORS = (OS - OC)/ORS_norm[:, None]
OACRS = np.cross(OA, ORS)
OZCA = OA * OA[:, 2][:, None]
Z_unit = np.full([len(OZCA), 3], 1)
Z_adjust = np.array([0, 0, 1])
Z_unit = Z_unit*Z_adjust
Z_OZCA = Z_unit-OZCA
OZPACB = Z_OZCA/((Z_OZCA[:, 0]**2+Z_OZCA[:, 1]**2+Z_OZCA[:, 2]**2)**0.5)[:, None]
OROTNOTSIGNED = np.zeros([len(ORS)])
for i in range(len(ORS)):
OROTNOTSIGNED[i] = np.arccos(np.dot(OZPACB[i, :], ORS[i, :]) /
(np.sqrt(np.dot(OZPACB[i, :], OZPACB[i, :]))) *
(np.sqrt(np.dot(ORS[i, :], ORS[i, :]))))*180/math.pi
OZPACBCRS_cross = np.cross(OZPACB, ORS)
OZPACBCRS = OZPACBCRS_cross/((OZPACBCRS_cross[:, 0]**2+OZPACBCRS_cross[:, 1]**2+OZPACBCRS_cross[:, 2]**2)**0.5)[:, None]
OFORSIGN_temp = (OA - OZPACBCRS)**2
OFORSIGN = OFORSIGN_temp[:, 0]+OFORSIGN_temp[:, 1]+OFORSIGN_temp[:, 2]
OROT = OROTNOTSIGNED
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = -(OROT[i])
for i in range(len(OROT)):
if OFORSIGN[i] < 0.25:
OROT[i] = -(OROT[i])
###### Below introduces new shift to account for upper vs. lower leaflet #####
for i in range(len(OWAS)):
OWAS[i] = abs(-(OWAS[i])+180) # made this an absolute value so that the tilt remains positive
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = OROT[i]+180
elif OROT[i] > 0:
OROT[i] = OROT[i]-180
###### Above introduces new shift to account for upper vs. lower leaflet #####
###### Below might have to be updated to take into account the periodic nature of the rotation ######
if protein_type == 'RAS-ONLY':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
diff0 = []
for i in range(len(RAS_ONLY_macrostate)):
#diff0.append([((RAS_ONLY_macrostate[i,0]-OWAS[j])**2+(RAS_ONLY_macrostate[i,1]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,6]])
diff0.append([((RAS_ONLY_macrostate[i,1]-OWAS[j])**2+(RAS_ONLY_macrostate[i,0]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,5]])
diff0.sort()
states[j] = diff0[0][1]
elif protein_type == 'RAS-RAF':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
### below: adding in the requirements for the 'high-z' state ###
if (OROT[j] < -45 or OROT[j] > 140) and z_pos[j] > 4.8:
states[j] = 3
else:
### above: adding in the requirements for the 'high-z' state ###
diff0 = []
for i in range(len(RAS_RAF_macrostate)):
#diff0.append([((RAS_RAF_macrostate[i,0]-OWAS[j])**2+(RAS_RAF_macrostate[i,1]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,6]])
diff0.append([((RAS_RAF_macrostate[i,1]-OWAS[j])**2+(RAS_RAF_macrostate[i,0]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,4]])
diff0.sort()
states[j] = diff0[0][1]
###### Above might have to be updated to take into account the periodic nature of the rotation ######
###### Assume we want to remove this? Where is the code that reads this information? i.e. will there be knock-on effects? ######
###### If feedback code needs index 5 (two_states) from the output, deleting this four_states will shift that to index 4 #######
# four_states = np.zeros(len(OROT))
# for j in range(len(OROT)):
# diff0 = []
# for i in range(len(macrostate4)):
# diff0.append([((macrostate4[i,0]-OWAS[j])**2+(macrostate4[i,1]-OROT[j])**2)**0.5, macrostate4[i,6]])
# diff0.sort()
# four_states[j] = diff0[0][1]+1
###### below: old output details.... ######################################
###### Updated - RAS-only to NOT HAVE the Z-distance ######################
###### Updated - Added in the protein 'tag', i.e. RAS-ONLY or RAS-RAF #####
# OUTPUT = np.zeros([len(OROT), 6])
# for i in range(len(OROT)):
# OUTPUT[i] = timeframes[i], OWAS[i], OROT[i], z_pos[i], four_states[i], two_states[i]
###### above: old output details.... ######################################
###### below: NEW output details.... ######################################
if protein_type == 'RAS-ONLY':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], 'n/a', int(states[i])
elif protein_type == 'RAS-RAF':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], z_pos[i], int(states[i])
ALLOUT.append(OUTPUT)
return np.asarray(ALLOUT)
#np.savetxt(str(tpr)+"_tilt_rot_z_state.KRAS_"+str(k+1)+".txt", OUTPUT, fmt=['%i','%10.3f','%10.3f','%10.3f','%i','%i'], delimiter=' ')
| en | 0.56434 | ############################################################################### # @todo add Pilot2-splash-app disclaimer ############################################################################### Get's KRAS states ############## Below section needs to be uncommented ############ # # Logger has to be initialized the first thing in the script # # Innitilize MuMMI if it has not been done before # MUMMI_ROOT = mummi.init(True) # This is needed so the Naming works below #@TODO fix this so we don't have these on import make them as an init # #RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-ONLY.microstates.txt")) # #RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-RAF.microstates.txt")) # Note diffrent number of columns so index change below # TODO: CS, my edits to test # RAS_ONLY_macrostate = np.loadtxt('ras-states.txt') # RAS_RAF_macrostate = np.loadtxt('ras-raf-states.txt') ############## above section needs to be uncommented ############ # TODO: CS, my edits to test # TODO: TSC, The reference structure has to currently be set as the 'RAS-ONLY-reference-structure.gro' # TODO: TSC, path to the reference structure is: mummi_resources/structures/ # kras_ref_universe = mda.Universe("RAS-ONLY-reference-structure.gro") # kras_ref_universe = mda.Universe('AA_pfpatch_000000004641_RAS_RAF2_411.gro') # TODO: CS, not using these for x4 proteins; instead using protein_systems below to set num_res ######### Below hard codes the number of residues within RAS-only and RAS-RAF ########## ######### Above hard codes the number of residues within RAS-only and RAS-RAF ########## ####### This can be removed # def get_kras(syst, kras_start): # """Gets all atoms for a KRAS protein starting at 'kras_start'.""" # return syst.atoms[kras_start:kras_start+428] ####### This can be removed Identifies the list of segments within the system. Only needs to be called x1 time # print(segs[i]) Uses the segments identified in get_segids to make a list of all proteins in the systems.\ Outputs a list of the first residue number of the protein, and whether it is 'RAS-ONLY', or 'RAS-RAF'.\ The 'tag' input defines what is used to identify the first residue of the protein. i.e. 'resname ACE1 and name BB'.\ Only needs to be called x1 time #np.empty([len(RAS)+len(RAF),2]) ######## sort protein info ######## sort protein info Gets the reference KRAS struct. Only called x1 time when class is loaded # Load inital ref frames (only need to do this once) Gets states for all KRAS proteins in path. # res_shift = 8 # all_glycine = u.select_atoms("resname GLY") # kras_indices = [] # for i in range(0, len(all_glycine), 26): # kras_indices.append(all_glycine[i].index) ########## Below is taken out of the function so it is only done once ######### # kras_indices = get_protein_info(u,'resname ACE1 and name BB') ########## Above is taken out of the function so it is only done once ######### # CS, for x4 cases: # [{protein_x4: (protein_type, num_res)}] # 'RAS-ONLY' OR 'RAS-RAF' # TODO: CS, replacing this comment section with the above, to handle x4 protein types # --------------------------------------- # ALLOUT = [] # for k in range(len(kras_indices)): # start_of_g = kras_indices[k][0] # protein_type = str(kras_indices[k][1]) # ########## BELOW SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ############## # ########## POTENTIALLY REDO WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ####### # ########## HAS BEEN REDONE WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ######## # # if len(kras_indices) == 1: # # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') ####### HAS TO BE FIXED FOR BACKBONE ATOMS FOR SPECIFIC PROTEIN # # elif len(kras_indices) > 1: # # if k == len(kras_indices)-1: # # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') # # else: # # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(kras_indices[k+1][0])+' and name BB') # ########## ABOVE SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ############## # # ########## Below hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations ######################### # if protein_type == 'RAS-ONLY': # num_res = RAS_only_num_res # elif protein_type == 'RAS-RAF': # num_res = RAS_RAF_num_res # ########## Above hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations ######################### # --------------------------------------- # TODO: TSC, I changed the selection below, which can be used for the make_whole... # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)+' and (name CA or name BB)') # TODO: CS, something wrong with ref0 from get_kras_ref() # just making ref0 = mobile0 to test for now # ref0 = mobile0 # TSC removed this ######## TODO: TSC, Adjusted for AA lipid names ######## # lipids = u.select_atoms('resname POPX POPC PAPC POPE DIPE DPSM PAPS PAP6 CHOL') # TODO: CS, for AA need bonds to run make_whole() # krases0_BB.guess_bonds() # TODO: CS, turn off for now to test beyond this point *** for AA, need to bring that back on once all else runs *** # @Tim and <NAME>. this was commented out - please check. #make_whole(krases0_BB) ############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES BELOW #################### ############### TODO: TSC, zshifting is set to -1 (instead of -2), as there are ACE caps that are separate residues in AA #zshifting=-1 ############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES ABOVE #################### ###### Below introduces new shift to account for upper vs. lower leaflet ##### # made this an absolute value so that the tilt remains positive ###### Above introduces new shift to account for upper vs. lower leaflet ##### ###### Below might have to be updated to take into account the periodic nature of the rotation ###### #diff0.append([((RAS_ONLY_macrostate[i,0]-OWAS[j])**2+(RAS_ONLY_macrostate[i,1]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,6]]) ### below: adding in the requirements for the 'high-z' state ### ### above: adding in the requirements for the 'high-z' state ### #diff0.append([((RAS_RAF_macrostate[i,0]-OWAS[j])**2+(RAS_RAF_macrostate[i,1]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,6]]) ###### Above might have to be updated to take into account the periodic nature of the rotation ###### ###### Assume we want to remove this? Where is the code that reads this information? i.e. will there be knock-on effects? ###### ###### If feedback code needs index 5 (two_states) from the output, deleting this four_states will shift that to index 4 ####### # four_states = np.zeros(len(OROT)) # for j in range(len(OROT)): # diff0 = [] # for i in range(len(macrostate4)): # diff0.append([((macrostate4[i,0]-OWAS[j])**2+(macrostate4[i,1]-OROT[j])**2)**0.5, macrostate4[i,6]]) # diff0.sort() # four_states[j] = diff0[0][1]+1 ###### below: old output details.... ###################################### ###### Updated - RAS-only to NOT HAVE the Z-distance ###################### ###### Updated - Added in the protein 'tag', i.e. RAS-ONLY or RAS-RAF ##### # OUTPUT = np.zeros([len(OROT), 6]) # for i in range(len(OROT)): # OUTPUT[i] = timeframes[i], OWAS[i], OROT[i], z_pos[i], four_states[i], two_states[i] ###### above: old output details.... ###################################### ###### below: NEW output details.... ###################################### #np.savetxt(str(tpr)+"_tilt_rot_z_state.KRAS_"+str(k+1)+".txt", OUTPUT, fmt=['%i','%10.3f','%10.3f','%10.3f','%i','%i'], delimiter=' ') | 1.91122 | 2 |
homeassistant/components/switch/hikvisioncam.py | maddox/home-assistant | 1 | 897 | """
homeassistant.components.switch.hikvision
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support turning on/off motion detection on Hikvision cameras.
Note: Currently works using default https port only.
CGI API Guide: http://bit.ly/1RuyUuF
Configuration:
To use the Hikvision motion detection switch you will need to add something
like the following to your config/configuration.yaml
switch:
platform: hikvisioncam
name: Hikvision Cam 1 Motion Detection
host: 192.168.1.32
username: YOUR_USERNAME
password: <PASSWORD>
Variables:
host
*Required
This is the IP address of your Hikvision camera. Example: 192.168.1.32
username
*Required
Your Hikvision camera username.
password
*<PASSWORD>
<PASSWORD>.
name
*Optional
The name to use when displaying this switch instance.
"""
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
import logging
try:
import hikvision.api
from hikvision.error import HikvisionError, MissingParamError
except ImportError:
hikvision.api = None
_LOGGING = logging.getLogger(__name__)
REQUIREMENTS = ['hikvision==0.4']
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Setup Hikvision Camera config. """
host = config.get(CONF_HOST, None)
port = config.get('port', "80")
name = config.get('name', "Hikvision Camera Motion Detection")
username = config.get(CONF_USERNAME, "admin")
password = config.get(CONF_PASSWORD, "<PASSWORD>")
if hikvision.api is None:
_LOGGING.error((
"Failed to import hikvision. Did you maybe not install the "
"'hikvision' dependency?"))
return False
try:
hikvision_cam = hikvision.api.CreateDevice(
host, port=port, username=username,
password=password, is_https=False)
except MissingParamError as param_err:
_LOGGING.error("Missing required param: %s", param_err)
return False
except HikvisionError as conn_err:
_LOGGING.error("Unable to connect: %s", conn_err)
return False
add_devices_callback([
HikvisionMotionSwitch(name, hikvision_cam)
])
class HikvisionMotionSwitch(ToggleEntity):
""" Provides a switch to toggle on/off motion detection. """
def __init__(self, name, hikvision_cam):
self._name = name
self._hikvision_cam = hikvision_cam
self._state = STATE_OFF
@property
def should_poll(self):
""" Poll for status regularly. """
return True
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the state of the device if any. """
return self._state
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
_LOGGING.info("Turning on Motion Detection ")
self._hikvision_cam.enable_motion_detection()
def turn_off(self, **kwargs):
""" Turn the device off. """
_LOGGING.info("Turning off Motion Detection ")
self._hikvision_cam.disable_motion_detection()
def update(self):
""" Update Motion Detection state """
enabled = self._hikvision_cam.is_motion_detection_enabled()
_LOGGING.info('enabled: %s', enabled)
self._state = STATE_ON if enabled else STATE_OFF
| """
homeassistant.components.switch.hikvision
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support turning on/off motion detection on Hikvision cameras.
Note: Currently works using default https port only.
CGI API Guide: http://bit.ly/1RuyUuF
Configuration:
To use the Hikvision motion detection switch you will need to add something
like the following to your config/configuration.yaml
switch:
platform: hikvisioncam
name: Hikvision Cam 1 Motion Detection
host: 192.168.1.32
username: YOUR_USERNAME
password: <PASSWORD>
Variables:
host
*Required
This is the IP address of your Hikvision camera. Example: 192.168.1.32
username
*Required
Your Hikvision camera username.
password
*<PASSWORD>
<PASSWORD>.
name
*Optional
The name to use when displaying this switch instance.
"""
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
import logging
try:
import hikvision.api
from hikvision.error import HikvisionError, MissingParamError
except ImportError:
hikvision.api = None
_LOGGING = logging.getLogger(__name__)
REQUIREMENTS = ['hikvision==0.4']
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Setup Hikvision Camera config. """
host = config.get(CONF_HOST, None)
port = config.get('port', "80")
name = config.get('name', "Hikvision Camera Motion Detection")
username = config.get(CONF_USERNAME, "admin")
password = config.get(CONF_PASSWORD, "<PASSWORD>")
if hikvision.api is None:
_LOGGING.error((
"Failed to import hikvision. Did you maybe not install the "
"'hikvision' dependency?"))
return False
try:
hikvision_cam = hikvision.api.CreateDevice(
host, port=port, username=username,
password=password, is_https=False)
except MissingParamError as param_err:
_LOGGING.error("Missing required param: %s", param_err)
return False
except HikvisionError as conn_err:
_LOGGING.error("Unable to connect: %s", conn_err)
return False
add_devices_callback([
HikvisionMotionSwitch(name, hikvision_cam)
])
class HikvisionMotionSwitch(ToggleEntity):
""" Provides a switch to toggle on/off motion detection. """
def __init__(self, name, hikvision_cam):
self._name = name
self._hikvision_cam = hikvision_cam
self._state = STATE_OFF
@property
def should_poll(self):
""" Poll for status regularly. """
return True
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the state of the device if any. """
return self._state
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
_LOGGING.info("Turning on Motion Detection ")
self._hikvision_cam.enable_motion_detection()
def turn_off(self, **kwargs):
""" Turn the device off. """
_LOGGING.info("Turning off Motion Detection ")
self._hikvision_cam.disable_motion_detection()
def update(self):
""" Update Motion Detection state """
enabled = self._hikvision_cam.is_motion_detection_enabled()
_LOGGING.info('enabled: %s', enabled)
self._state = STATE_ON if enabled else STATE_OFF
| en | 0.654288 | homeassistant.components.switch.hikvision ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support turning on/off motion detection on Hikvision cameras. Note: Currently works using default https port only. CGI API Guide: http://bit.ly/1RuyUuF Configuration: To use the Hikvision motion detection switch you will need to add something like the following to your config/configuration.yaml switch: platform: hikvisioncam name: Hikvision Cam 1 Motion Detection host: 192.168.1.32 username: YOUR_USERNAME password: <PASSWORD> Variables: host *Required This is the IP address of your Hikvision camera. Example: 192.168.1.32 username *Required Your Hikvision camera username. password *<PASSWORD> <PASSWORD>. name *Optional The name to use when displaying this switch instance. # pylint: disable=too-many-arguments # pylint: disable=too-many-instance-attributes Setup Hikvision Camera config. Provides a switch to toggle on/off motion detection. Poll for status regularly. Returns the name of the device if any. Returns the state of the device if any. True if device is on. Turn the device on. Turn the device off. Update Motion Detection state | 2.440274 | 2 |
src/richie/apps/search/filter_definitions/mixins.py | leduong/richie | 0 | 898 | """Define mixins to easily compose custom FilterDefinition classes."""
class TermsQueryMixin:
"""A mixin for filter definitions that need to apply term queries."""
def get_query_fragment(self, data):
"""Build the query fragments as term queries for each selected value."""
value_list = data.get(self.name)
# For terms filters, as the name implies, it's a simple terms fragment
return (
[{"key": self.name, "fragment": [{"terms": {self.term: value_list}}]}]
if value_list
else []
)
class ChoicesQueryMixin:
"""A mixin for filter definitions that need to apply predefined queries."""
def get_query_fragment(self, data):
"""Pick the hardcoded query fragment for each selected value."""
fragment_map = self.get_fragment_map()
return [
{"key": self.name, "fragment": fragment_map[value]}
for value in data.get(self.name, [])
]
class ChoicesAggsMixin:
"""A mixin for filter definitions that need to apply aggregations for predefined choices."""
# pylint: disable=unused-argument
def get_aggs_fragment(self, queries, *args, **kwargs):
"""
Build the aggregations as a set of filters, one for each possible value of the field.
"""
return {
# Create a custom aggregation for each possible choice for this filter
# eg `availability@coming_soon` & `availability@current` & `availability@open`
"{:s}@{:s}".format(self.name, choice_key): {
"filter": {
"bool": {
# Use all the query fragments from the queries *but* the one(s) that
# filter on the current filter: we manually add back the only one that
# is relevant to the current choice.
"must": choice_fragment
+ [
clause
for kf_pair in queries
for clause in kf_pair["fragment"]
if kf_pair["key"] is not self.name
]
}
}
}
for choice_key, choice_fragment in self.get_fragment_map().items()
}
class NestedChoicesAggsMixin:
"""
A mixin for filter definitions that are related to a nested field. The aggregation filter can
only be recomputed at the level of the parent because it should group all queries of fields
nested below the parent.
"""
# pylint: disable=unused-argument
def get_aggs_fragment(self, queries, data, parent, *args, **kwargs):
"""
Computing aggregations for a nested field is DIFFICULT because query fragments related to
nested fields are grouped under their common path. For example combined filters on
availability and languages would lead to a query like:
{
"query": {
"nested": {
"path": "course_runs",
"query": {
"bool": {
"must": [
{"range": {"course_runs.end": {"lte": "01-01-2019"}}},
{"terms": {"course_runs.languages": ["de", "en", fr"]}},
]
}
},
}
}
}
In this example, computing the facet count for the French filter, is done with the
following filter (excluding the filter on English and German so we only count French):
{
"query": {
"nested": {
"path": "course_runs",
"query": {
"bool": {
"must": [
{"range": {"course_runs.end": {"lte": "01-01-2019"}}},
{"terms": {"course_runs.languages": ["fr"]}},
]
}
},
}
}
}
This can only be built by calling the parent NestingWrapper with customized filter data.
"""
return {
# Create a custom aggregation for each possible choice for this filter
# eg `availability@coming_soon` & `availability@current` & `availability@open`
"{:s}@{:s}".format(self.name, choice_key): {
"filter": {
"bool": {
# Use all the query fragments from the queries (the nesting parent is
# responsible for excluding the queries related to nested fields so we
# have to manually add them, making sure to apply on the current field
# only the current choice.
"must": [
clause
for kf_pair in (
queries
+ parent.get_query_fragment(
# override data with only the current choice
{**data, self.name: [choice_key]}
)
)
for clause in kf_pair["fragment"]
]
}
}
}
for choice_key, choice_fragment in self.get_fragment_map().items()
}
| """Define mixins to easily compose custom FilterDefinition classes."""
class TermsQueryMixin:
"""A mixin for filter definitions that need to apply term queries."""
def get_query_fragment(self, data):
"""Build the query fragments as term queries for each selected value."""
value_list = data.get(self.name)
# For terms filters, as the name implies, it's a simple terms fragment
return (
[{"key": self.name, "fragment": [{"terms": {self.term: value_list}}]}]
if value_list
else []
)
class ChoicesQueryMixin:
"""A mixin for filter definitions that need to apply predefined queries."""
def get_query_fragment(self, data):
"""Pick the hardcoded query fragment for each selected value."""
fragment_map = self.get_fragment_map()
return [
{"key": self.name, "fragment": fragment_map[value]}
for value in data.get(self.name, [])
]
class ChoicesAggsMixin:
"""A mixin for filter definitions that need to apply aggregations for predefined choices."""
# pylint: disable=unused-argument
def get_aggs_fragment(self, queries, *args, **kwargs):
"""
Build the aggregations as a set of filters, one for each possible value of the field.
"""
return {
# Create a custom aggregation for each possible choice for this filter
# eg `availability@coming_soon` & `availability@current` & `availability@open`
"{:s}@{:s}".format(self.name, choice_key): {
"filter": {
"bool": {
# Use all the query fragments from the queries *but* the one(s) that
# filter on the current filter: we manually add back the only one that
# is relevant to the current choice.
"must": choice_fragment
+ [
clause
for kf_pair in queries
for clause in kf_pair["fragment"]
if kf_pair["key"] is not self.name
]
}
}
}
for choice_key, choice_fragment in self.get_fragment_map().items()
}
class NestedChoicesAggsMixin:
"""
A mixin for filter definitions that are related to a nested field. The aggregation filter can
only be recomputed at the level of the parent because it should group all queries of fields
nested below the parent.
"""
# pylint: disable=unused-argument
def get_aggs_fragment(self, queries, data, parent, *args, **kwargs):
"""
Computing aggregations for a nested field is DIFFICULT because query fragments related to
nested fields are grouped under their common path. For example combined filters on
availability and languages would lead to a query like:
{
"query": {
"nested": {
"path": "course_runs",
"query": {
"bool": {
"must": [
{"range": {"course_runs.end": {"lte": "01-01-2019"}}},
{"terms": {"course_runs.languages": ["de", "en", fr"]}},
]
}
},
}
}
}
In this example, computing the facet count for the French filter, is done with the
following filter (excluding the filter on English and German so we only count French):
{
"query": {
"nested": {
"path": "course_runs",
"query": {
"bool": {
"must": [
{"range": {"course_runs.end": {"lte": "01-01-2019"}}},
{"terms": {"course_runs.languages": ["fr"]}},
]
}
},
}
}
}
This can only be built by calling the parent NestingWrapper with customized filter data.
"""
return {
# Create a custom aggregation for each possible choice for this filter
# eg `availability@coming_soon` & `availability@current` & `availability@open`
"{:s}@{:s}".format(self.name, choice_key): {
"filter": {
"bool": {
# Use all the query fragments from the queries (the nesting parent is
# responsible for excluding the queries related to nested fields so we
# have to manually add them, making sure to apply on the current field
# only the current choice.
"must": [
clause
for kf_pair in (
queries
+ parent.get_query_fragment(
# override data with only the current choice
{**data, self.name: [choice_key]}
)
)
for clause in kf_pair["fragment"]
]
}
}
}
for choice_key, choice_fragment in self.get_fragment_map().items()
}
| en | 0.837509 | Define mixins to easily compose custom FilterDefinition classes. A mixin for filter definitions that need to apply term queries. Build the query fragments as term queries for each selected value. # For terms filters, as the name implies, it's a simple terms fragment A mixin for filter definitions that need to apply predefined queries. Pick the hardcoded query fragment for each selected value. A mixin for filter definitions that need to apply aggregations for predefined choices. # pylint: disable=unused-argument Build the aggregations as a set of filters, one for each possible value of the field. # Create a custom aggregation for each possible choice for this filter # eg `availability@coming_soon` & `availability@current` & `availability@open` # Use all the query fragments from the queries *but* the one(s) that # filter on the current filter: we manually add back the only one that # is relevant to the current choice. A mixin for filter definitions that are related to a nested field. The aggregation filter can only be recomputed at the level of the parent because it should group all queries of fields nested below the parent. # pylint: disable=unused-argument Computing aggregations for a nested field is DIFFICULT because query fragments related to nested fields are grouped under their common path. For example combined filters on availability and languages would lead to a query like: { "query": { "nested": { "path": "course_runs", "query": { "bool": { "must": [ {"range": {"course_runs.end": {"lte": "01-01-2019"}}}, {"terms": {"course_runs.languages": ["de", "en", fr"]}}, ] } }, } } } In this example, computing the facet count for the French filter, is done with the following filter (excluding the filter on English and German so we only count French): { "query": { "nested": { "path": "course_runs", "query": { "bool": { "must": [ {"range": {"course_runs.end": {"lte": "01-01-2019"}}}, {"terms": {"course_runs.languages": ["fr"]}}, ] } }, } } } This can only be built by calling the parent NestingWrapper with customized filter data. # Create a custom aggregation for each possible choice for this filter # eg `availability@coming_soon` & `availability@current` & `availability@open` # Use all the query fragments from the queries (the nesting parent is # responsible for excluding the queries related to nested fields so we # have to manually add them, making sure to apply on the current field # only the current choice. # override data with only the current choice | 2.796901 | 3 |
electrumsv/gui/qt/receive_view.py | AustEcon/electrumsv | 1 | 899 | from typing import List, Optional, TYPE_CHECKING
import weakref
from PyQt5.QtCore import QEvent, Qt
from PyQt5.QtWidgets import (QComboBox, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QVBoxLayout, QWidget)
from electrumsv.app_state import app_state
from electrumsv.bitcoin import script_template_to_string
from electrumsv.constants import PaymentFlag, RECEIVING_SUBPATH
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.wallet_database.tables import KeyInstanceRow
from electrumsv import web
from .amountedit import AmountEdit, BTCAmountEdit
from .constants import expiration_values
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .qrcodewidget import QRCodeWidget
from .qrwindow import QR_Window
from .request_list import RequestList
from .table_widgets import TableTopButtonLayout
from .util import ButtonsLineEdit, EnterButton, HelpLabel
class ReceiveView(QWidget):
_qr_window: Optional[QR_Window] = None
def __init__(self, main_window: 'ElectrumWindow', account_id: int) -> None:
super().__init__(main_window)
self._main_window = weakref.proxy(main_window)
self._account_id = account_id
self._account = main_window._wallet.get_account(account_id)
self._logger = logs.get_logger(f"receive-view[{self._account_id}]")
self._receive_key_id: Optional[int] = None
self._request_list_toolbar_layout = TableTopButtonLayout()
self._request_list_toolbar_layout.refresh_signal.connect(
self._main_window.refresh_wallet_display)
self._request_list_toolbar_layout.filter_signal.connect(self._filter_request_list)
form_layout = self.create_form_layout()
self._request_list = RequestList(self, main_window)
request_container = self.create_request_list_container()
vbox = QVBoxLayout(self)
vbox.addLayout(form_layout)
vbox.addSpacing(20)
vbox.addWidget(request_container, 1)
self.setLayout(vbox)
def clean_up(self) -> None:
# If there are no accounts there won't be a receive QR code object created yet.
if self._receive_qr is not None:
self._receive_qr.clean_up()
if self._qr_window is not None:
self._qr_window.close()
def create_form_layout(self) -> QHBoxLayout:
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self._receive_destination_e = ButtonsLineEdit()
self._receive_destination_e.addCopyButton(app_state.app)
self._receive_destination_e.setReadOnly(True)
msg = _('Bitcoin SV payment destination where the payment should be received. '
'Note that each payment request uses a different Bitcoin SV payment destination.')
receive_address_label = HelpLabel(_('Receiving destination'), msg)
self._receive_destination_e.textChanged.connect(self._update_receive_qr)
self._receive_destination_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(receive_address_label, 0, 0)
grid.addWidget(self._receive_destination_e, 0, 1, 1, -1)
self._receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self._receive_message_e, 1, 1, 1, -1)
self._receive_message_e.textChanged.connect(self._update_receive_qr)
self._receive_amount_e = BTCAmountEdit()
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self._receive_amount_e, 2, 1)
self._receive_amount_e.textChanged.connect(self._update_receive_qr)
self._fiat_receive_e = AmountEdit(app_state.fx.get_currency if app_state.fx else '')
if not app_state.fx or not app_state.fx.is_enabled():
self._fiat_receive_e.setVisible(False)
grid.addWidget(self._fiat_receive_e, 2, 2, Qt.AlignLeft)
self._main_window.connect_fields(self._receive_amount_e, self._fiat_receive_e)
self._expires_combo = QComboBox()
self._expires_combo.addItems([i[0] for i in expiration_values])
self._expires_combo.setCurrentIndex(3)
self._expires_combo.setFixedWidth(self._receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them '
'a signed payment request.'),
_('Expired requests have to be deleted manually from your list, '
'in order to free the corresponding Bitcoin SV addresses.'),
_('The Bitcoin SV address never expires and will always be part '
'of this ElectrumSV wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self._expires_combo, 3, 1)
self._expires_label = QLineEdit('')
self._expires_label.setReadOnly(1)
self._expires_label.setFocusPolicy(Qt.NoFocus)
self._expires_label.hide()
grid.addWidget(self._expires_label, 3, 1)
self._save_request_button = EnterButton(_('Save request'), self._save_form_as_request)
self._new_request_button = EnterButton(_('New'), self._new_payment_request)
self._receive_qr = QRCodeWidget(fixedSize=200)
self._receive_qr.link_to_window(self._toggle_qr_window)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self._save_request_button)
buttons.addWidget(self._new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self._receive_qr)
return hbox
def create_request_list_container(self) -> QGroupBox:
layout = QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(6, 0, 6, 6)
layout.addLayout(self._request_list_toolbar_layout)
layout.addWidget(self._request_list)
request_box = QGroupBox()
request_box.setTitle(_('Requests'))
request_box.setAlignment(Qt.AlignCenter)
request_box.setContentsMargins(0, 0, 0, 0)
request_box.setLayout(layout)
return request_box
def update_widgets(self) -> None:
self._request_list.update()
def update_destination(self) -> None:
text = ""
if self._receive_key_id is not None:
script_template = self._account.get_script_template_for_id(self._receive_key_id)
if script_template is not None:
text = script_template_to_string(script_template)
self._receive_destination_e.setText(text)
def update_contents(self) -> None:
self._expires_label.hide()
self._expires_combo.show()
if self._account.is_deterministic():
fresh_key = self._account.get_fresh_keys(RECEIVING_SUBPATH, 1)[0]
self.set_receive_key(fresh_key)
def update_for_fx_quotes(self) -> None:
if self._account_id is not None:
edit = (self._fiat_receive_e
if self._fiat_receive_e.is_last_edited else self._receive_amount_e)
edit.textEdited.emit(edit.text())
# Bound to text fields in `_create_receive_form_layout`.
def _update_receive_qr(self) -> None:
if self._receive_key_id is None:
return
amount = self._receive_amount_e.get_amount()
message = self._receive_message_e.text()
self._save_request_button.setEnabled((amount is not None) or (message != ""))
script_template = self._account.get_script_template_for_id(self._receive_key_id)
address_text = script_template_to_string(script_template)
uri = web.create_URI(address_text, amount, message)
self._receive_qr.setData(uri)
if self._qr_window and self._qr_window.isVisible():
self._qr_window.set_content(self._receive_destination_e.text(), amount,
message, uri)
def _toggle_qr_window(self, event: QEvent) -> None:
if self._receive_key_id is None:
self.show_message(_("No available receiving destination."))
return
if not self._qr_window:
self._qr_window = QR_Window(self)
self._qr_window.setVisible(True)
self._qr_window_geometry = self._qr_window.geometry()
else:
if not self._qr_window.isVisible():
self._qr_window.setVisible(True)
self._qr_window.setGeometry(self._qr_window_geometry)
else:
self._qr_window_geometry = self._qr_window.geometry()
self._qr_window.setVisible(False)
self._update_receive_qr()
def set_fiat_ccy_enabled(self, flag: bool) -> None:
self._fiat_receive_e.setVisible(flag)
def get_bsv_edits(self) -> List[BTCAmountEdit]:
return [ self._receive_amount_e ]
def _save_form_as_request(self) -> None:
if not self._receive_key_id:
self._main_window.show_error(_('No receiving payment destination'))
return
amount = self._receive_amount_e.get_amount()
message = self._receive_message_e.text()
if not message and not amount:
self._main_window.show_error(_('No message or amount'))
return
def callback(exc_value: Optional[Exception]=None) -> None:
if exc_value is not None:
raise exc_value # pylint: disable=raising-bad-type
self._request_list.update_signal.emit()
i = self._expires_combo.currentIndex()
expiration = [x[1] for x in expiration_values][i]
row = self._account.requests.get_request_for_key_id(self._receive_key_id)
if row is None:
row = self._account.requests.create_request(self._receive_key_id,
PaymentFlag.UNPAID, amount, expiration, message, callback)
else:
# Expiration is just a label, so we don't use the value.
self._account.requests.update_request(row.paymentrequest_id, row.state, amount,
row.expiration, message, callback)
self._save_request_button.setEnabled(False)
def _new_payment_request(self) -> None:
keyinstances: List[KeyInstanceRow] = []
if self._account.is_deterministic():
keyinstances = self._account.get_fresh_keys(RECEIVING_SUBPATH, 1)
if not len(keyinstances):
if not self._account.is_deterministic():
msg = [
_('No more payment destinations in your wallet.'),
_('You are using a non-deterministic account, which '
'cannot create new payment destinations.'),
_('If you want to create new payment destinations, '
'use a deterministic account instead.')
]
self._main_window.show_message(' '.join(msg))
return
self._main_window.show_message(
_('Your wallet is broken and could not allocate a new payment destination.'))
self.update_contents()
self._new_request_button.setEnabled(False)
self._receive_message_e.setFocus(1)
def get_receive_key_id(self) -> Optional[int]:
return self._receive_key_id
# Only called from key list menu.
def receive_at_id(self, key_id: int) -> None:
self._receive_key_id = key_id
self._new_request_button.setEnabled(True)
self.update_destination()
self._main_window.show_receive_tab()
def set_receive_key_id(self, key_id: int) -> None:
self._receive_key_id = key_id
def set_receive_key(self, keyinstance: KeyInstanceRow) -> None:
self._receive_key_id = keyinstance.keyinstance_id
self._receive_message_e.setText("")
self._receive_amount_e.setAmount(None)
self.update_destination()
def set_form_contents(self, address_text: str, value: int, description: Optional[str]=None,
expires_description: str="") -> None:
self._receive_destination_e.setText(address_text)
self._receive_message_e.setText(description or "")
self._receive_amount_e.setAmount(value)
self._expires_combo.hide()
self._expires_label.show()
self._expires_label.setText(expires_description)
self._new_request_button.setEnabled(True)
def set_new_button_enabled(self, flag: bool) -> None:
self._new_request_button.setEnabled(flag)
def _filter_request_list(self, text: str) -> None:
self._request_list.filter(text)
| from typing import List, Optional, TYPE_CHECKING
import weakref
from PyQt5.QtCore import QEvent, Qt
from PyQt5.QtWidgets import (QComboBox, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QVBoxLayout, QWidget)
from electrumsv.app_state import app_state
from electrumsv.bitcoin import script_template_to_string
from electrumsv.constants import PaymentFlag, RECEIVING_SUBPATH
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.wallet_database.tables import KeyInstanceRow
from electrumsv import web
from .amountedit import AmountEdit, BTCAmountEdit
from .constants import expiration_values
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .qrcodewidget import QRCodeWidget
from .qrwindow import QR_Window
from .request_list import RequestList
from .table_widgets import TableTopButtonLayout
from .util import ButtonsLineEdit, EnterButton, HelpLabel
class ReceiveView(QWidget):
_qr_window: Optional[QR_Window] = None
def __init__(self, main_window: 'ElectrumWindow', account_id: int) -> None:
super().__init__(main_window)
self._main_window = weakref.proxy(main_window)
self._account_id = account_id
self._account = main_window._wallet.get_account(account_id)
self._logger = logs.get_logger(f"receive-view[{self._account_id}]")
self._receive_key_id: Optional[int] = None
self._request_list_toolbar_layout = TableTopButtonLayout()
self._request_list_toolbar_layout.refresh_signal.connect(
self._main_window.refresh_wallet_display)
self._request_list_toolbar_layout.filter_signal.connect(self._filter_request_list)
form_layout = self.create_form_layout()
self._request_list = RequestList(self, main_window)
request_container = self.create_request_list_container()
vbox = QVBoxLayout(self)
vbox.addLayout(form_layout)
vbox.addSpacing(20)
vbox.addWidget(request_container, 1)
self.setLayout(vbox)
def clean_up(self) -> None:
# If there are no accounts there won't be a receive QR code object created yet.
if self._receive_qr is not None:
self._receive_qr.clean_up()
if self._qr_window is not None:
self._qr_window.close()
def create_form_layout(self) -> QHBoxLayout:
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self._receive_destination_e = ButtonsLineEdit()
self._receive_destination_e.addCopyButton(app_state.app)
self._receive_destination_e.setReadOnly(True)
msg = _('Bitcoin SV payment destination where the payment should be received. '
'Note that each payment request uses a different Bitcoin SV payment destination.')
receive_address_label = HelpLabel(_('Receiving destination'), msg)
self._receive_destination_e.textChanged.connect(self._update_receive_qr)
self._receive_destination_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(receive_address_label, 0, 0)
grid.addWidget(self._receive_destination_e, 0, 1, 1, -1)
self._receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self._receive_message_e, 1, 1, 1, -1)
self._receive_message_e.textChanged.connect(self._update_receive_qr)
self._receive_amount_e = BTCAmountEdit()
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self._receive_amount_e, 2, 1)
self._receive_amount_e.textChanged.connect(self._update_receive_qr)
self._fiat_receive_e = AmountEdit(app_state.fx.get_currency if app_state.fx else '')
if not app_state.fx or not app_state.fx.is_enabled():
self._fiat_receive_e.setVisible(False)
grid.addWidget(self._fiat_receive_e, 2, 2, Qt.AlignLeft)
self._main_window.connect_fields(self._receive_amount_e, self._fiat_receive_e)
self._expires_combo = QComboBox()
self._expires_combo.addItems([i[0] for i in expiration_values])
self._expires_combo.setCurrentIndex(3)
self._expires_combo.setFixedWidth(self._receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them '
'a signed payment request.'),
_('Expired requests have to be deleted manually from your list, '
'in order to free the corresponding Bitcoin SV addresses.'),
_('The Bitcoin SV address never expires and will always be part '
'of this ElectrumSV wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self._expires_combo, 3, 1)
self._expires_label = QLineEdit('')
self._expires_label.setReadOnly(1)
self._expires_label.setFocusPolicy(Qt.NoFocus)
self._expires_label.hide()
grid.addWidget(self._expires_label, 3, 1)
self._save_request_button = EnterButton(_('Save request'), self._save_form_as_request)
self._new_request_button = EnterButton(_('New'), self._new_payment_request)
self._receive_qr = QRCodeWidget(fixedSize=200)
self._receive_qr.link_to_window(self._toggle_qr_window)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self._save_request_button)
buttons.addWidget(self._new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self._receive_qr)
return hbox
def create_request_list_container(self) -> QGroupBox:
layout = QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(6, 0, 6, 6)
layout.addLayout(self._request_list_toolbar_layout)
layout.addWidget(self._request_list)
request_box = QGroupBox()
request_box.setTitle(_('Requests'))
request_box.setAlignment(Qt.AlignCenter)
request_box.setContentsMargins(0, 0, 0, 0)
request_box.setLayout(layout)
return request_box
def update_widgets(self) -> None:
self._request_list.update()
def update_destination(self) -> None:
text = ""
if self._receive_key_id is not None:
script_template = self._account.get_script_template_for_id(self._receive_key_id)
if script_template is not None:
text = script_template_to_string(script_template)
self._receive_destination_e.setText(text)
def update_contents(self) -> None:
self._expires_label.hide()
self._expires_combo.show()
if self._account.is_deterministic():
fresh_key = self._account.get_fresh_keys(RECEIVING_SUBPATH, 1)[0]
self.set_receive_key(fresh_key)
def update_for_fx_quotes(self) -> None:
if self._account_id is not None:
edit = (self._fiat_receive_e
if self._fiat_receive_e.is_last_edited else self._receive_amount_e)
edit.textEdited.emit(edit.text())
# Bound to text fields in `_create_receive_form_layout`.
def _update_receive_qr(self) -> None:
if self._receive_key_id is None:
return
amount = self._receive_amount_e.get_amount()
message = self._receive_message_e.text()
self._save_request_button.setEnabled((amount is not None) or (message != ""))
script_template = self._account.get_script_template_for_id(self._receive_key_id)
address_text = script_template_to_string(script_template)
uri = web.create_URI(address_text, amount, message)
self._receive_qr.setData(uri)
if self._qr_window and self._qr_window.isVisible():
self._qr_window.set_content(self._receive_destination_e.text(), amount,
message, uri)
def _toggle_qr_window(self, event: QEvent) -> None:
if self._receive_key_id is None:
self.show_message(_("No available receiving destination."))
return
if not self._qr_window:
self._qr_window = QR_Window(self)
self._qr_window.setVisible(True)
self._qr_window_geometry = self._qr_window.geometry()
else:
if not self._qr_window.isVisible():
self._qr_window.setVisible(True)
self._qr_window.setGeometry(self._qr_window_geometry)
else:
self._qr_window_geometry = self._qr_window.geometry()
self._qr_window.setVisible(False)
self._update_receive_qr()
def set_fiat_ccy_enabled(self, flag: bool) -> None:
self._fiat_receive_e.setVisible(flag)
def get_bsv_edits(self) -> List[BTCAmountEdit]:
return [ self._receive_amount_e ]
def _save_form_as_request(self) -> None:
if not self._receive_key_id:
self._main_window.show_error(_('No receiving payment destination'))
return
amount = self._receive_amount_e.get_amount()
message = self._receive_message_e.text()
if not message and not amount:
self._main_window.show_error(_('No message or amount'))
return
def callback(exc_value: Optional[Exception]=None) -> None:
if exc_value is not None:
raise exc_value # pylint: disable=raising-bad-type
self._request_list.update_signal.emit()
i = self._expires_combo.currentIndex()
expiration = [x[1] for x in expiration_values][i]
row = self._account.requests.get_request_for_key_id(self._receive_key_id)
if row is None:
row = self._account.requests.create_request(self._receive_key_id,
PaymentFlag.UNPAID, amount, expiration, message, callback)
else:
# Expiration is just a label, so we don't use the value.
self._account.requests.update_request(row.paymentrequest_id, row.state, amount,
row.expiration, message, callback)
self._save_request_button.setEnabled(False)
def _new_payment_request(self) -> None:
keyinstances: List[KeyInstanceRow] = []
if self._account.is_deterministic():
keyinstances = self._account.get_fresh_keys(RECEIVING_SUBPATH, 1)
if not len(keyinstances):
if not self._account.is_deterministic():
msg = [
_('No more payment destinations in your wallet.'),
_('You are using a non-deterministic account, which '
'cannot create new payment destinations.'),
_('If you want to create new payment destinations, '
'use a deterministic account instead.')
]
self._main_window.show_message(' '.join(msg))
return
self._main_window.show_message(
_('Your wallet is broken and could not allocate a new payment destination.'))
self.update_contents()
self._new_request_button.setEnabled(False)
self._receive_message_e.setFocus(1)
def get_receive_key_id(self) -> Optional[int]:
return self._receive_key_id
# Only called from key list menu.
def receive_at_id(self, key_id: int) -> None:
self._receive_key_id = key_id
self._new_request_button.setEnabled(True)
self.update_destination()
self._main_window.show_receive_tab()
def set_receive_key_id(self, key_id: int) -> None:
self._receive_key_id = key_id
def set_receive_key(self, keyinstance: KeyInstanceRow) -> None:
self._receive_key_id = keyinstance.keyinstance_id
self._receive_message_e.setText("")
self._receive_amount_e.setAmount(None)
self.update_destination()
def set_form_contents(self, address_text: str, value: int, description: Optional[str]=None,
expires_description: str="") -> None:
self._receive_destination_e.setText(address_text)
self._receive_message_e.setText(description or "")
self._receive_amount_e.setAmount(value)
self._expires_combo.hide()
self._expires_label.show()
self._expires_label.setText(expires_description)
self._new_request_button.setEnabled(True)
def set_new_button_enabled(self, flag: bool) -> None:
self._new_request_button.setEnabled(flag)
def _filter_request_list(self, text: str) -> None:
self._request_list.filter(text)
| en | 0.807271 | # If there are no accounts there won't be a receive QR code object created yet. # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 # Bound to text fields in `_create_receive_form_layout`. # pylint: disable=raising-bad-type # Expiration is just a label, so we don't use the value. # Only called from key list menu. | 1.769653 | 2 |
Subsets and Splits