max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
scrapy/http/request/__init__.py | joybhallaa/scrapy | 1 | 700 | """
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
from scrapy.utils.curl import curl_to_request_kwargs
class Request(object_ref):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
if callback is not None and not callable(callback):
raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
if errback is not None and not callable(errback):
raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
@property
def cb_kwargs(self):
if self._cb_kwargs is None:
self._cb_kwargs = {}
return self._cb_kwargs
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_url(self):
return self._url
def _set_url(self, url):
if not isinstance(url, str):
raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
s = safe_url_string(url, self.encoding)
self._url = escape_ajax(s)
if ('://' not in self._url) and (not self._url.startswith('data:')):
raise ValueError('Missing scheme in request url: %s' % self._url)
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
else:
self._body = to_bytes(body, self.encoding)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
@property
def encoding(self):
return self._encoding
def __str__(self):
return "<%s %s>" % (self.method, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
@classmethod
def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):
"""Create a Request object from a string containing a `cURL
<https://curl.haxx.se/>`_ command. It populates the HTTP method, the
URL, the headers, the cookies and the body. It accepts the same
arguments as the :class:`Request` class, taking preference and
overriding the values of the same arguments contained in the cURL
command.
Unrecognized options are ignored by default. To raise an error when
finding unknown options call this method by passing
``ignore_unknown_options=False``.
.. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`
subclasses, such as :class:`~scrapy.http.JSONRequest`, or
:class:`~scrapy.http.XmlRpcRequest`, as well as having
:ref:`downloader middlewares <topics-downloader-middleware>`
and
:ref:`spider middlewares <topics-spider-middleware>`
enabled, such as
:class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
:class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
or
:class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
may modify the :class:`~scrapy.http.Request` object.
To translate a cURL command into a Scrapy request,
you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_.
"""
request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
request_kwargs.update(kwargs)
return cls(**request_kwargs)
| """
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
from scrapy.utils.curl import curl_to_request_kwargs
class Request(object_ref):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
if callback is not None and not callable(callback):
raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
if errback is not None and not callable(errback):
raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
@property
def cb_kwargs(self):
if self._cb_kwargs is None:
self._cb_kwargs = {}
return self._cb_kwargs
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_url(self):
return self._url
def _set_url(self, url):
if not isinstance(url, str):
raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
s = safe_url_string(url, self.encoding)
self._url = escape_ajax(s)
if ('://' not in self._url) and (not self._url.startswith('data:')):
raise ValueError('Missing scheme in request url: %s' % self._url)
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
else:
self._body = to_bytes(body, self.encoding)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
@property
def encoding(self):
return self._encoding
def __str__(self):
return "<%s %s>" % (self.method, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
@classmethod
def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):
"""Create a Request object from a string containing a `cURL
<https://curl.haxx.se/>`_ command. It populates the HTTP method, the
URL, the headers, the cookies and the body. It accepts the same
arguments as the :class:`Request` class, taking preference and
overriding the values of the same arguments contained in the cURL
command.
Unrecognized options are ignored by default. To raise an error when
finding unknown options call this method by passing
``ignore_unknown_options=False``.
.. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`
subclasses, such as :class:`~scrapy.http.JSONRequest`, or
:class:`~scrapy.http.XmlRpcRequest`, as well as having
:ref:`downloader middlewares <topics-downloader-middleware>`
and
:ref:`spider middlewares <topics-spider-middleware>`
enabled, such as
:class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
:class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
or
:class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
may modify the :class:`~scrapy.http.Request` object.
To translate a cURL command into a Scrapy request,
you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_.
"""
request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
request_kwargs.update(kwargs)
return cls(**request_kwargs)
| en | 0.668892 | This module implements the Request class which is used to represent HTTP requests in Scrapy. See documentation in docs/topics/request-response.rst # this one has to be set first Return a copy of this Request Create a new Request with the same attributes except for those given new values. Create a Request object from a string containing a `cURL <https://curl.haxx.se/>`_ command. It populates the HTTP method, the URL, the headers, the cookies and the body. It accepts the same arguments as the :class:`Request` class, taking preference and overriding the values of the same arguments contained in the cURL command. Unrecognized options are ignored by default. To raise an error when finding unknown options call this method by passing ``ignore_unknown_options=False``. .. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request` subclasses, such as :class:`~scrapy.http.JSONRequest`, or :class:`~scrapy.http.XmlRpcRequest`, as well as having :ref:`downloader middlewares <topics-downloader-middleware>` and :ref:`spider middlewares <topics-spider-middleware>` enabled, such as :class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`, :class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`, or :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`, may modify the :class:`~scrapy.http.Request` object. To translate a cURL command into a Scrapy request, you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_. | 2.62473 | 3 |
game.py | akaeme/BlackJackBot | 0 | 701 | <filename>game.py
#encoding: utf8
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = "GPL"
__version__ = "0.1"
import copy
import card
from shoe import Shoe
from dealer import Dealer
from player import Player
BET_MULTIPLIER = 2
class Game(object):
class Rules():
def __init__(self, shoe_size=4, min_bet=1, max_bet=10):
self.shoe_size = shoe_size
self.min_bet = min_bet
self.max_bet = max_bet
self.bet_multiplier = BET_MULTIPLIER
def __str__(self):
return "RULES\tMin bet: {}, Max bet: {}, Shoe size: {}, Bet multiplier: {}".format(self.min_bet, self.max_bet, self.shoe_size, self.bet_multiplier)
class PlayerState():
def __init__(self, p):
self.player = p
self.bet = 0
self.hand = []
self.bust = False
self.done = False
self.watch = False
def copy(self):
return copy.deepcopy(self)
def __str__(self):
if isinstance(self.player, Dealer):
return "{}".format(self.hand)
return "{} ({}€)".format(self.hand, self.bet)
def __repr__(self):
return "{}".format(self.player.name)
def hide_card(self):
h = self.copy()
h.hand = h.hand[1:]
return h
def want_to_play(self, rules):
return self.player.want_to_play(rules)
def take_bet(self, state, rules):
bet = 0
while (bet!=self.bet and self.bet!=0) or not (rules.min_bet <= bet <= rules.max_bet) : #bets can't be 0 and double down means double down
bet = self.player.bet(state[0].hide_card(), state[1:])
self.bet += bet
def __init__(self, players, shoe_size=4, debug=False, verbose=True, min_bet=1, max_bet=10, shoe=None):
if verbose:
# print(chr(27) + "[2J")
print("-"*80)
self.verbose = verbose
self.debug = debug
self.rules = self.Rules(shoe_size=shoe_size, min_bet=min_bet, max_bet=max_bet)
self.shoe = Shoe(shoe_size)
if shoe != None:
self.shoe = shoe
self.shoe.shuffle()
self.state = [self.PlayerState(Dealer())] + [self.PlayerState(p) for p in players]
self.done = False
def str_players_hands(self):
o = ""
for p in self.state[1:]:
o+="{!s:^45}".format(p)
return o
def str_players_names(self):
o = ""
for p in self.state[1:]:
o+="{!s:^35}".format(p.player)
return o
def __str__(self):
return (\
"{:^30}\n"\
"╔"+"═══════════════════════════════"*(len(self.state)-1)+"╗\n"\
"{!s:^45}\n"\
" \n"\
" \n"\
" \n"\
" \n"\
" \n"\
"{!s}\n"\
"╚"+"═══════════════════════════════"*(len(self.state)-1)+"╝\n"\
"{}\n"\
).format(self.state[0].player.name, self.state[0].hand if self.done else (["**"]+self.state[0].hide_card().hand if len(self.state[0].hand) else []), self.str_players_hands(), self.str_players_names())
def deal(self, num):
return self.shoe.deal_cards(1)
def take_bets(self):
if self.debug:
print(self)
for p in self.state[1:]:
if p.want_to_play(self.rules):
p.take_bet(self.state, self.rules)
else:
p.watch = True
def loop(self):
#deal initial cards
self.state[0].hand += self.shoe.deal_cards(2)
for p in self.state[1:]:
if not p.watch:
p.hand += self.shoe.deal_cards(2)
turn = 0
if card.blackjack(self.state[0].hand): #if the dealer has blackjack there is no point in playing...
self.done = True
return [p for p in self.state[1:] if card.blackjack(p.hand)]
#lets play
while not self.done:
turn += 1
hits = 0
for p in self.state[::-1]:
if p.watch or p.bust or p.done or card.value(p.hand) == 21: #skip players watching, bust players, players who have double down and players who already have blackjack!
continue
if self.debug:
print("TURN {}: {}".format(turn, p.player.name))
print(self)
action = ""
while action not in ["h", "s", "d", "u"]:
if isinstance(p.player, Dealer):
action = p.player.play(self.state[0], self.state[1:])
else:
action = p.player.play(self.state[0].hide_card(), self.state[1:])
if action == "d" and turn != 1:
print("YOU CAN'T DOUBLE DOWN!!! double down is only available on the 1st turn")
action = ""
if action == "u":
p.watch = True
continue
if action == "d":
p.take_bet(self.state,self.rules)
p.done = True
if action in ["h", "d"]:
p.hand+=self.deal(1)
hits +=1
if card.value(p.hand) >= 21:
if card.value(p.hand) > 21:
p.bust = True
else:
p.done = True #already has blackjack
if isinstance(p.player, Dealer):
self.done = True #game is over we already have a blackjack
if hits == 0:
self.done = True
self.done = True
return [p for p in self.state if
not isinstance(p.player, Dealer) and #Dealer is not really a winner
not card.blackjack(self.state[0].hand) and #If dealer gets blackjack no one wins
not p.watch and #players watching can't win :)
not p.bust and #bust players can't win :)
(card.value(p.hand) >= card.value(self.state[0].hand) or self.state[0].bust) #winners have more points then the dealer or the dealer has gone bust
]
def show_table(self):
for p in self.state[1:]:
p.player.show(self.state)
def payback(self, winners):
for p in self.state[1:]:
if p.watch:
#check if player surrendered
if p.bet > 0:
p.player.payback(-p.bet//2) #this means the player lost half his bet
#skip watchers
continue
if p in winners and card.value(self.state[0].hand) == card.value(p.hand):
p.player.payback(0) #bet is returned
elif p in winners:
p.player.payback(-p.bet + p.bet*BET_MULTIPLIER)
else:
p.player.payback(-p.bet) #this means the player lost
def run(self):
self.take_bets()
winners = self.loop()
self.show_table()
self.payback(winners)
if self.verbose:
print(self)
print("🏆 Winners: "+str(winners))
| <filename>game.py
#encoding: utf8
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = "GPL"
__version__ = "0.1"
import copy
import card
from shoe import Shoe
from dealer import Dealer
from player import Player
BET_MULTIPLIER = 2
class Game(object):
class Rules():
def __init__(self, shoe_size=4, min_bet=1, max_bet=10):
self.shoe_size = shoe_size
self.min_bet = min_bet
self.max_bet = max_bet
self.bet_multiplier = BET_MULTIPLIER
def __str__(self):
return "RULES\tMin bet: {}, Max bet: {}, Shoe size: {}, Bet multiplier: {}".format(self.min_bet, self.max_bet, self.shoe_size, self.bet_multiplier)
class PlayerState():
def __init__(self, p):
self.player = p
self.bet = 0
self.hand = []
self.bust = False
self.done = False
self.watch = False
def copy(self):
return copy.deepcopy(self)
def __str__(self):
if isinstance(self.player, Dealer):
return "{}".format(self.hand)
return "{} ({}€)".format(self.hand, self.bet)
def __repr__(self):
return "{}".format(self.player.name)
def hide_card(self):
h = self.copy()
h.hand = h.hand[1:]
return h
def want_to_play(self, rules):
return self.player.want_to_play(rules)
def take_bet(self, state, rules):
bet = 0
while (bet!=self.bet and self.bet!=0) or not (rules.min_bet <= bet <= rules.max_bet) : #bets can't be 0 and double down means double down
bet = self.player.bet(state[0].hide_card(), state[1:])
self.bet += bet
def __init__(self, players, shoe_size=4, debug=False, verbose=True, min_bet=1, max_bet=10, shoe=None):
if verbose:
# print(chr(27) + "[2J")
print("-"*80)
self.verbose = verbose
self.debug = debug
self.rules = self.Rules(shoe_size=shoe_size, min_bet=min_bet, max_bet=max_bet)
self.shoe = Shoe(shoe_size)
if shoe != None:
self.shoe = shoe
self.shoe.shuffle()
self.state = [self.PlayerState(Dealer())] + [self.PlayerState(p) for p in players]
self.done = False
def str_players_hands(self):
o = ""
for p in self.state[1:]:
o+="{!s:^45}".format(p)
return o
def str_players_names(self):
o = ""
for p in self.state[1:]:
o+="{!s:^35}".format(p.player)
return o
def __str__(self):
return (\
"{:^30}\n"\
"╔"+"═══════════════════════════════"*(len(self.state)-1)+"╗\n"\
"{!s:^45}\n"\
" \n"\
" \n"\
" \n"\
" \n"\
" \n"\
"{!s}\n"\
"╚"+"═══════════════════════════════"*(len(self.state)-1)+"╝\n"\
"{}\n"\
).format(self.state[0].player.name, self.state[0].hand if self.done else (["**"]+self.state[0].hide_card().hand if len(self.state[0].hand) else []), self.str_players_hands(), self.str_players_names())
def deal(self, num):
return self.shoe.deal_cards(1)
def take_bets(self):
if self.debug:
print(self)
for p in self.state[1:]:
if p.want_to_play(self.rules):
p.take_bet(self.state, self.rules)
else:
p.watch = True
def loop(self):
#deal initial cards
self.state[0].hand += self.shoe.deal_cards(2)
for p in self.state[1:]:
if not p.watch:
p.hand += self.shoe.deal_cards(2)
turn = 0
if card.blackjack(self.state[0].hand): #if the dealer has blackjack there is no point in playing...
self.done = True
return [p for p in self.state[1:] if card.blackjack(p.hand)]
#lets play
while not self.done:
turn += 1
hits = 0
for p in self.state[::-1]:
if p.watch or p.bust or p.done or card.value(p.hand) == 21: #skip players watching, bust players, players who have double down and players who already have blackjack!
continue
if self.debug:
print("TURN {}: {}".format(turn, p.player.name))
print(self)
action = ""
while action not in ["h", "s", "d", "u"]:
if isinstance(p.player, Dealer):
action = p.player.play(self.state[0], self.state[1:])
else:
action = p.player.play(self.state[0].hide_card(), self.state[1:])
if action == "d" and turn != 1:
print("YOU CAN'T DOUBLE DOWN!!! double down is only available on the 1st turn")
action = ""
if action == "u":
p.watch = True
continue
if action == "d":
p.take_bet(self.state,self.rules)
p.done = True
if action in ["h", "d"]:
p.hand+=self.deal(1)
hits +=1
if card.value(p.hand) >= 21:
if card.value(p.hand) > 21:
p.bust = True
else:
p.done = True #already has blackjack
if isinstance(p.player, Dealer):
self.done = True #game is over we already have a blackjack
if hits == 0:
self.done = True
self.done = True
return [p for p in self.state if
not isinstance(p.player, Dealer) and #Dealer is not really a winner
not card.blackjack(self.state[0].hand) and #If dealer gets blackjack no one wins
not p.watch and #players watching can't win :)
not p.bust and #bust players can't win :)
(card.value(p.hand) >= card.value(self.state[0].hand) or self.state[0].bust) #winners have more points then the dealer or the dealer has gone bust
]
def show_table(self):
for p in self.state[1:]:
p.player.show(self.state)
def payback(self, winners):
for p in self.state[1:]:
if p.watch:
#check if player surrendered
if p.bet > 0:
p.player.payback(-p.bet//2) #this means the player lost half his bet
#skip watchers
continue
if p in winners and card.value(self.state[0].hand) == card.value(p.hand):
p.player.payback(0) #bet is returned
elif p in winners:
p.player.payback(-p.bet + p.bet*BET_MULTIPLIER)
else:
p.player.payback(-p.bet) #this means the player lost
def run(self):
self.take_bets()
winners = self.loop()
self.show_table()
self.payback(winners)
if self.verbose:
print(self)
print("🏆 Winners: "+str(winners))
| en | 0.974123 | #encoding: utf8 #bets can't be 0 and double down means double down # print(chr(27) + "[2J") #deal initial cards #if the dealer has blackjack there is no point in playing... #lets play #skip players watching, bust players, players who have double down and players who already have blackjack! #already has blackjack #game is over we already have a blackjack #Dealer is not really a winner #If dealer gets blackjack no one wins #players watching can't win :) #bust players can't win :) #winners have more points then the dealer or the dealer has gone bust #check if player surrendered #this means the player lost half his bet #skip watchers #bet is returned #this means the player lost | 3.560056 | 4 |
loops/for/for3.py | camipozas/python-exercises | 0 | 702 | # Escribir un programa que muestre la sumatoria de todos los múltiplos de 7 encontrados entre el 0 y el 100.
# Summing all the multiples of 7 from 0 to 100.
total = 0
for i in range(101):
if i % 7 == 0:
total = total+i
print("Sumatoria de los múltiplos de 7:", total)
| # Escribir un programa que muestre la sumatoria de todos los múltiplos de 7 encontrados entre el 0 y el 100.
# Summing all the multiples of 7 from 0 to 100.
total = 0
for i in range(101):
if i % 7 == 0:
total = total+i
print("Sumatoria de los múltiplos de 7:", total)
| es | 0.811284 | # Escribir un programa que muestre la sumatoria de todos los múltiplos de 7 encontrados entre el 0 y el 100. # Summing all the multiples of 7 from 0 to 100. | 3.94602 | 4 |
lib/TWCManager/Status/HASSStatus.py | Saftwerk/TWCManager | 1 | 703 | # HomeAssistant Status Output
# Publishes the provided sensor key and value pair to a HomeAssistant instance
import logging
import time
from ww import f
logger = logging.getLogger(__name__.rsplit(".")[-1])
class HASSStatus:
import threading
import requests
apiKey = None
config = None
configConfig = None
configHASS = None
master = None
msgRateInSeconds = 60
resendRateInSeconds = 3600
retryRateInSeconds = 60
msgQueue = {}
status = False
serverIP = None
serverPort = 8123
useHttps = False
timeout = 2
backgroundTasksLock = threading.Lock()
backgroundTasksThread = None
def __init__(self, master):
self.config = master.config
self.master = master
try:
self.configConfig = self.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHASS = self.config["status"]["HASS"]
except KeyError:
self.configHASS = {}
self.status = self.configHASS.get("enabled", False)
self.serverIP = self.configHASS.get("serverIP", None)
self.serverPort = self.configHASS.get("serverPort", 8123)
self.useHttps = self.configHASS.get("useHttps", False)
self.apiKey = self.configHASS.get("apiKey", None)
self.msgRateInSeconds = self.configHASS.get("msgRateInSeconds", 60)
self.resendRateInSeconds = self.configHASS.get("resendRateInSeconds", 3600)
self.retryRateInSeconds = self.configHASS.get("retryRateInSeconds", 60)
# Unload if this module is disabled or misconfigured
if (
(not self.status)
or (not self.serverIP)
or (int(self.serverPort) < 1)
or (not self.apiKey)
):
self.master.releaseModule("lib.TWCManager.Status", "HASSStatus")
else:
self.backgroundTasksThread = self.threading.Thread(
target=self.background_task_thread, args=()
)
self.backgroundTasksThread.daemon = True
self.backgroundTasksThread.start()
def getTwident(self, twcid):
# Format TWCID nicely
if len(twcid) == 2:
return "%02X%02X" % (twcid[0], twcid[1])
else:
return str(twcid.decode("utf-8"))
def background_task_thread(self):
while True:
time.sleep(self.msgRateInSeconds)
self.backgroundTasksLock.acquire()
for msgKey in self.msgQueue:
msg = self.msgQueue[msgKey]
if msg.elapsingTime < time.time():
self.sendingStatusToHASS(msg)
self.backgroundTasksLock.release()
def getSensorName(self, twcid, key_underscore):
return "sensor.twcmanager_" + str(self.getTwident(twcid)) + "_" + key_underscore
def setStatus(self, twcid, key_underscore, key_camelcase, value, unit):
self.backgroundTasksLock.acquire()
sensor = self.getSensorName(twcid, key_underscore)
if (sensor not in self.msgQueue) or (self.msgQueue[sensor].value != value):
self.msgQueue[sensor] = HASSMessage(
time.time(),
sensor,
twcid,
key_underscore,
key_camelcase,
value,
unit,
)
self.backgroundTasksLock.release()
def sendingStatusToHASS(self, msg):
http = "http://" if not (self.useHttps) else "https://"
url = http + self.serverIP + ":" + self.serverPort
url = url + "/api/states/" + msg.sensor
headers = {
"Authorization": "Bearer " + self.apiKey,
"content-type": "application/json",
}
try:
logger.log(
logging.INFO8,
f(
"Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})."
),
)
devclass = ""
if str.upper(msg.unit) in ["W", "A", "V", "KWH"]:
devclass = "power"
if len(msg.unit) > 0:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"unit_of_measurement": msg.unit,
"device_class": devclass,
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase,
},
},
timeout=self.timeout,
headers=headers,
)
else:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase
},
},
timeout=self.timeout,
headers=headers,
)
# Setting elapsing time to now + resendRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
time.time() + self.resendRateInSeconds
)
except self.requests.exceptions.ConnectionError as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except self.requests.exceptions.ReadTimeout as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except Exception as e:
logger.log(
logging.INFO4, "Error during publishing HomeAssistant sensor values"
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
def settingRetryRate(self, msg):
# Setting elapsing time to now + retryRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
time.time() + self.retryRateInSeconds
)
class HASSMessage:
elapsingTime = 0
sensor = ""
twcid = ""
key_underscore = ""
key_camelcase = ""
value = None
unit = ""
def __init__(
self, elapsingTime, sensor, twcid, key_underscore, key_camelcase, value, unit
):
self.elapsingTime = elapsingTime
self.sensor = sensor
self.twcid = twcid
self.key_underscore = key_underscore
self.key_camelcase = key_camelcase
self.value = value
self.unit = unit
| # HomeAssistant Status Output
# Publishes the provided sensor key and value pair to a HomeAssistant instance
import logging
import time
from ww import f
logger = logging.getLogger(__name__.rsplit(".")[-1])
class HASSStatus:
import threading
import requests
apiKey = None
config = None
configConfig = None
configHASS = None
master = None
msgRateInSeconds = 60
resendRateInSeconds = 3600
retryRateInSeconds = 60
msgQueue = {}
status = False
serverIP = None
serverPort = 8123
useHttps = False
timeout = 2
backgroundTasksLock = threading.Lock()
backgroundTasksThread = None
def __init__(self, master):
self.config = master.config
self.master = master
try:
self.configConfig = self.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHASS = self.config["status"]["HASS"]
except KeyError:
self.configHASS = {}
self.status = self.configHASS.get("enabled", False)
self.serverIP = self.configHASS.get("serverIP", None)
self.serverPort = self.configHASS.get("serverPort", 8123)
self.useHttps = self.configHASS.get("useHttps", False)
self.apiKey = self.configHASS.get("apiKey", None)
self.msgRateInSeconds = self.configHASS.get("msgRateInSeconds", 60)
self.resendRateInSeconds = self.configHASS.get("resendRateInSeconds", 3600)
self.retryRateInSeconds = self.configHASS.get("retryRateInSeconds", 60)
# Unload if this module is disabled or misconfigured
if (
(not self.status)
or (not self.serverIP)
or (int(self.serverPort) < 1)
or (not self.apiKey)
):
self.master.releaseModule("lib.TWCManager.Status", "HASSStatus")
else:
self.backgroundTasksThread = self.threading.Thread(
target=self.background_task_thread, args=()
)
self.backgroundTasksThread.daemon = True
self.backgroundTasksThread.start()
def getTwident(self, twcid):
# Format TWCID nicely
if len(twcid) == 2:
return "%02X%02X" % (twcid[0], twcid[1])
else:
return str(twcid.decode("utf-8"))
def background_task_thread(self):
while True:
time.sleep(self.msgRateInSeconds)
self.backgroundTasksLock.acquire()
for msgKey in self.msgQueue:
msg = self.msgQueue[msgKey]
if msg.elapsingTime < time.time():
self.sendingStatusToHASS(msg)
self.backgroundTasksLock.release()
def getSensorName(self, twcid, key_underscore):
return "sensor.twcmanager_" + str(self.getTwident(twcid)) + "_" + key_underscore
def setStatus(self, twcid, key_underscore, key_camelcase, value, unit):
self.backgroundTasksLock.acquire()
sensor = self.getSensorName(twcid, key_underscore)
if (sensor not in self.msgQueue) or (self.msgQueue[sensor].value != value):
self.msgQueue[sensor] = HASSMessage(
time.time(),
sensor,
twcid,
key_underscore,
key_camelcase,
value,
unit,
)
self.backgroundTasksLock.release()
def sendingStatusToHASS(self, msg):
http = "http://" if not (self.useHttps) else "https://"
url = http + self.serverIP + ":" + self.serverPort
url = url + "/api/states/" + msg.sensor
headers = {
"Authorization": "Bearer " + self.apiKey,
"content-type": "application/json",
}
try:
logger.log(
logging.INFO8,
f(
"Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})."
),
)
devclass = ""
if str.upper(msg.unit) in ["W", "A", "V", "KWH"]:
devclass = "power"
if len(msg.unit) > 0:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"unit_of_measurement": msg.unit,
"device_class": devclass,
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase,
},
},
timeout=self.timeout,
headers=headers,
)
else:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase
},
},
timeout=self.timeout,
headers=headers,
)
# Setting elapsing time to now + resendRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
time.time() + self.resendRateInSeconds
)
except self.requests.exceptions.ConnectionError as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except self.requests.exceptions.ReadTimeout as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except Exception as e:
logger.log(
logging.INFO4, "Error during publishing HomeAssistant sensor values"
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
def settingRetryRate(self, msg):
# Setting elapsing time to now + retryRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
time.time() + self.retryRateInSeconds
)
class HASSMessage:
elapsingTime = 0
sensor = ""
twcid = ""
key_underscore = ""
key_camelcase = ""
value = None
unit = ""
def __init__(
self, elapsingTime, sensor, twcid, key_underscore, key_camelcase, value, unit
):
self.elapsingTime = elapsingTime
self.sensor = sensor
self.twcid = twcid
self.key_underscore = key_underscore
self.key_camelcase = key_camelcase
self.value = value
self.unit = unit
| en | 0.585634 | # HomeAssistant Status Output # Publishes the provided sensor key and value pair to a HomeAssistant instance # Unload if this module is disabled or misconfigured # Format TWCID nicely # Setting elapsing time to now + resendRateInSeconds # Setting elapsing time to now + retryRateInSeconds | 2.306427 | 2 |
Archive/routes/home_routes.py | taycurran/TwitOff | 0 | 704 |
from flask import Blueprint, jsonify, request, render_template
home_routes = Blueprint("home_routes", __name__)
@home_routes.route("/")
def index():
users = User.query.all()
return render_template('base.html', title='Home',
users=users)
@home_routes.route("/about")
def about():
return "About Me"
@home_routes.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='Reset', users=[])
# # Add config for database
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
# # stop tracking modifications on sqlalchemy config
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# # ? app.config["TWITTER_API_CLIENT"] = twitter
# # Have the database know about the app
# DB.init_app(app) |
from flask import Blueprint, jsonify, request, render_template
home_routes = Blueprint("home_routes", __name__)
@home_routes.route("/")
def index():
users = User.query.all()
return render_template('base.html', title='Home',
users=users)
@home_routes.route("/about")
def about():
return "About Me"
@home_routes.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='Reset', users=[])
# # Add config for database
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
# # stop tracking modifications on sqlalchemy config
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# # ? app.config["TWITTER_API_CLIENT"] = twitter
# # Have the database know about the app
# DB.init_app(app) | en | 0.452172 | # # Add config for database # app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3' # # stop tracking modifications on sqlalchemy config # app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # # ? app.config["TWITTER_API_CLIENT"] = twitter # # Have the database know about the app # DB.init_app(app) | 2.57849 | 3 |
intent/scripts/classification/ctn_to_classifier.py | rgeorgi/intent | 3 | 705 | <reponame>rgeorgi/intent<filename>intent/scripts/classification/ctn_to_classifier.py
from argparse import ArgumentParser
from collections import defaultdict
import glob
import os
import pickle
from random import shuffle, seed
import sys
from tempfile import mkdtemp
import shutil
import logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
CTN_LOG = logging.getLogger('CTN_CLASS')
CTN_LOG.setLevel(logging.DEBUG)
logging.basicConfig()
from intent.igt.metadata import set_intent_method, get_intent_method
from intent.interfaces.stanford_tagger import StanfordPOSTagger
from intent.pos.TagMap import TagMap
from intent.utils.env import tagger_model, proj_root
from xigt.codecs import xigtxml
from xigt.consts import ALIGNMENT
from intent.eval.pos_eval import poseval
from intent.igt.consts import GLOSS_WORD_ID, POS_TIER_TYPE, LANG_WORD_ID, GLOSS_WORD_TYPE, POS_TIER_ID, \
INTENT_TOKEN_TYPE, INTENT_POS_PROJ, LANG_WORD_TYPE, TRANS_WORD_TYPE, TRANS_WORD_ID, MANUAL_POS, INTENT_POS_CLASS
from intent.igt.rgxigt import RGCorpus, strip_pos, RGIgt, RGTokenTier, RGTier, gen_tier_id, RGToken, \
ProjectionTransGlossException, word_align
from intent.interfaces.mallet_maxent import MalletMaxent
from intent.scripts.classification.xigt_to_classifier import instances_to_classifier
from intent.utils.token import POSToken, GoldTagPOSToken
from intent.igt.igtutils import rgp
__author__ = 'rgeorgi'
"""
The purpose of this module is to evaluate the POS-line classifiers trained on
"""
def eval_classifier(c, inst_list, context_feats=False, posdict=None):
"""
:param c: The classifier
:param inst_list: A list of Igt instances to test against. Must already have POS tags.
"""
gold_sents = []
eval_sents = []
to_dump = RGCorpus()
for inst in inst_list:
to_tag = inst.copy()
strip_pos(to_tag)
# Do the classification.
to_tag.classify_gloss_pos(c, lowercase=True,
feat_next_gram=context_feats,
feat_prev_gram=context_feats,
posdict=posdict)
to_dump.append(to_tag)
# Fix the tags...
# fix_ctn_gloss_line(to_tag, tag_method=INTENT_POS_CLASS)
# Now, retrieve eval/gold.
eval_tags = [v.value() for v in to_tag.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_CLASS)]
gold_tags = [v.value() for v in inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS)]
tag_tokens = [POSToken('a', label=l) for l in eval_tags]
gold_tokens= [POSToken('a', label=l) for l in gold_tags]
if not len(tag_tokens) == len(gold_tokens):
print("LENGTH OF SEQUENCE IS MISMATCHED")
continue
gold_sents.append(gold_tokens)
eval_sents.append(tag_tokens)
xigtxml.dump(open('./enriched_ctn_dev.xml', 'w'), to_dump)
return poseval(eval_sents, gold_sents, details=True,csv=True, matrix=True)
def eval_proj(xc):
prj_sents = []
sup_sents = []
for inst in xc:
fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ)
# Do the projection comparison
sup = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS)
prj = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_PROJ)
sup_tags = []
prj_tags = []
for s in sup:
sup_tags.append(POSToken(s.value(), label=s.value()))
# If the same tag occurs in the projections...
if not prj:
prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED'))
continue
proj_tag = prj.find(alignment=s.attributes[ALIGNMENT])
if proj_tag:
prj_tags.append(POSToken(proj_tag.value(), label=proj_tag.value()))
else:
prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED'))
sup_sents.append(sup_tags)
prj_sents.append(prj_tags)
poseval(prj_sents, sup_sents, details=True)
def fix_ctn_gloss_line(inst, tag_method=None):
"""
Given a CTN gloss line, do some specific fixes to attempt to fix the CTN tag mapping.
:param inst:
:type inst:RGIgt
"""
gpos_tier = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=tag_method)
# Get the gloss words
for gw in inst.gloss:
new_tag = None
if gw.value().lower() in ['foc','top','seq','add','emph','cit','rep']:
new_tag = 'PRT'
elif gw.value().lower() in ['but','and','or']:
new_tag = 'CONJ'
elif 'dem' in gw.value().lower():
new_tag = 'PRON'
elif gw.value().lower() in ['for','in']:
new_tag = 'ADP'
elif gw.value().lower() in ['the']:
new_tag = 'DET'
if new_tag:
gpos = gpos_tier.find(alignment=gw.id)
if not gpos:
gpt = RGToken(id=gpos_tier.askItemId(), alignment=gw.id, text=new_tag)
gpos_tier.add(gpt)
else:
gpos.text = new_tag
if __name__ == '__main__':
ctn_train = './data/xml-files/ctn/ctn_train.xml'
ctn_dev = './data/xml-files/ctn/ctn_dev.xml'
ctn_dev_processed = './data/xml-files/ctn/ctn_dev_processed.xml'
ctn_train_processed = './data/xml-files/ctn/ctn_train_processed.xml'
posdict = pickle.load(open('./data/dictionaries/CTN.dict', 'rb'))
# print("Loading CTN Dev Corpus...", end=" ", flush=True)
# dev_xc = RGCorpus.load(ctn_dev)
# print("Done.")
#
# print("Loading CTN Train corpus...", end=" ", flush=True)
# train_xc = RGCorpus.load(ctn_train)
# print("Done.")
print("Initializing tagger...", end=" ", flush=True)
tagger = StanfordPOSTagger(tagger_model)
print("Done.")
# =============================================================================
# 1) Start by projecting the language line to the gloss line in the dev set,
# remapping it from the CTN tagset to the universal tagset along the way.
# =============================================================================
#
# print("Processing DEV corpus...", end=' ', flush=True)
# for inst in dev_xc:
# word_align(inst.gloss, inst.lang)
# inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt')
# fix_ctn_gloss_line(inst, tag_method=MANUAL_POS)
# inst.tag_trans_pos(tagger)
# inst.heur_align() # Align trans/gloss lines heuristically
# inst.project_trans_to_gloss() # Now, project heuristically.
# print('done.')
#
# xigtxml.dump(open(ctn_dev_processed, 'w', encoding='utf-8'), dev_xc)
#
#
# print("Processing TRAIN Corpus...", end=' ', flush=True)
# # Get the language line words projected onto the gloss...
# for inst in train_xc:
# word_align(inst.gloss, inst.lang)
# inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt')
# inst.tag_trans_pos(tagger)
# inst.heur_align()
# inst.project_trans_to_gloss()
# fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ)
#
# print("Done.")
#
# xigtxml.dump(open(ctn_train_processed, 'w', encoding='utf-8'), train_xc)
# sys.exit()
print("Loading Processed CTN Train corpus...", end=" ", flush=True)
train_xc = RGCorpus.load(ctn_train_processed)
print("Done.")
print("Loading Processed CTN Dev corpus...", end=" ", flush=True)
dev_xc = RGCorpus.load(ctn_dev_processed)
print("Done.")
#
# # =============================================================================
# # 2) Train a classifier based on the projected gloss line.
# # =============================================================================
#
index_list = [35,70,106,141,284,569,854,1139,1424,1708,1993,7120]
for train_stop_index in index_list:
train_instances = list(train_xc)[0:train_stop_index]
print('* '*50)
tokens = 0
for inst in train_instances:
tokens += len(inst.gloss)
print("Now training with {} tokens, {} instances.".format(tokens, train_stop_index))
print("Training Classifier...", end=" ", flush=True)
c = instances_to_classifier(train_instances, './ctn-train.class',
tag_method=MANUAL_POS,
posdict=posdict,
context_feats=True,
feat_path='./ctn-train_feats.txt')
print("Done.")
# c = MalletMaxent('/Users/rgeorgi/Documents/code/dissertation/gc.classifier')
# c = MalletMaxent('./ctn_class.class.classifier')
print("Evaluating classifier...", end=" ", flush=True)
eval_classifier(c, dev_xc, posdict=posdict, context_feats=True)
print("Done.")
# eval_proj(dev_xc)
| from argparse import ArgumentParser
from collections import defaultdict
import glob
import os
import pickle
from random import shuffle, seed
import sys
from tempfile import mkdtemp
import shutil
import logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
CTN_LOG = logging.getLogger('CTN_CLASS')
CTN_LOG.setLevel(logging.DEBUG)
logging.basicConfig()
from intent.igt.metadata import set_intent_method, get_intent_method
from intent.interfaces.stanford_tagger import StanfordPOSTagger
from intent.pos.TagMap import TagMap
from intent.utils.env import tagger_model, proj_root
from xigt.codecs import xigtxml
from xigt.consts import ALIGNMENT
from intent.eval.pos_eval import poseval
from intent.igt.consts import GLOSS_WORD_ID, POS_TIER_TYPE, LANG_WORD_ID, GLOSS_WORD_TYPE, POS_TIER_ID, \
INTENT_TOKEN_TYPE, INTENT_POS_PROJ, LANG_WORD_TYPE, TRANS_WORD_TYPE, TRANS_WORD_ID, MANUAL_POS, INTENT_POS_CLASS
from intent.igt.rgxigt import RGCorpus, strip_pos, RGIgt, RGTokenTier, RGTier, gen_tier_id, RGToken, \
ProjectionTransGlossException, word_align
from intent.interfaces.mallet_maxent import MalletMaxent
from intent.scripts.classification.xigt_to_classifier import instances_to_classifier
from intent.utils.token import POSToken, GoldTagPOSToken
from intent.igt.igtutils import rgp
__author__ = 'rgeorgi'
"""
The purpose of this module is to evaluate the POS-line classifiers trained on
"""
def eval_classifier(c, inst_list, context_feats=False, posdict=None):
"""
:param c: The classifier
:param inst_list: A list of Igt instances to test against. Must already have POS tags.
"""
gold_sents = []
eval_sents = []
to_dump = RGCorpus()
for inst in inst_list:
to_tag = inst.copy()
strip_pos(to_tag)
# Do the classification.
to_tag.classify_gloss_pos(c, lowercase=True,
feat_next_gram=context_feats,
feat_prev_gram=context_feats,
posdict=posdict)
to_dump.append(to_tag)
# Fix the tags...
# fix_ctn_gloss_line(to_tag, tag_method=INTENT_POS_CLASS)
# Now, retrieve eval/gold.
eval_tags = [v.value() for v in to_tag.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_CLASS)]
gold_tags = [v.value() for v in inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS)]
tag_tokens = [POSToken('a', label=l) for l in eval_tags]
gold_tokens= [POSToken('a', label=l) for l in gold_tags]
if not len(tag_tokens) == len(gold_tokens):
print("LENGTH OF SEQUENCE IS MISMATCHED")
continue
gold_sents.append(gold_tokens)
eval_sents.append(tag_tokens)
xigtxml.dump(open('./enriched_ctn_dev.xml', 'w'), to_dump)
return poseval(eval_sents, gold_sents, details=True,csv=True, matrix=True)
def eval_proj(xc):
prj_sents = []
sup_sents = []
for inst in xc:
fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ)
# Do the projection comparison
sup = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS)
prj = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_PROJ)
sup_tags = []
prj_tags = []
for s in sup:
sup_tags.append(POSToken(s.value(), label=s.value()))
# If the same tag occurs in the projections...
if not prj:
prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED'))
continue
proj_tag = prj.find(alignment=s.attributes[ALIGNMENT])
if proj_tag:
prj_tags.append(POSToken(proj_tag.value(), label=proj_tag.value()))
else:
prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED'))
sup_sents.append(sup_tags)
prj_sents.append(prj_tags)
poseval(prj_sents, sup_sents, details=True)
def fix_ctn_gloss_line(inst, tag_method=None):
"""
Given a CTN gloss line, do some specific fixes to attempt to fix the CTN tag mapping.
:param inst:
:type inst:RGIgt
"""
gpos_tier = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=tag_method)
# Get the gloss words
for gw in inst.gloss:
new_tag = None
if gw.value().lower() in ['foc','top','seq','add','emph','cit','rep']:
new_tag = 'PRT'
elif gw.value().lower() in ['but','and','or']:
new_tag = 'CONJ'
elif 'dem' in gw.value().lower():
new_tag = 'PRON'
elif gw.value().lower() in ['for','in']:
new_tag = 'ADP'
elif gw.value().lower() in ['the']:
new_tag = 'DET'
if new_tag:
gpos = gpos_tier.find(alignment=gw.id)
if not gpos:
gpt = RGToken(id=gpos_tier.askItemId(), alignment=gw.id, text=new_tag)
gpos_tier.add(gpt)
else:
gpos.text = new_tag
if __name__ == '__main__':
ctn_train = './data/xml-files/ctn/ctn_train.xml'
ctn_dev = './data/xml-files/ctn/ctn_dev.xml'
ctn_dev_processed = './data/xml-files/ctn/ctn_dev_processed.xml'
ctn_train_processed = './data/xml-files/ctn/ctn_train_processed.xml'
posdict = pickle.load(open('./data/dictionaries/CTN.dict', 'rb'))
# print("Loading CTN Dev Corpus...", end=" ", flush=True)
# dev_xc = RGCorpus.load(ctn_dev)
# print("Done.")
#
# print("Loading CTN Train corpus...", end=" ", flush=True)
# train_xc = RGCorpus.load(ctn_train)
# print("Done.")
print("Initializing tagger...", end=" ", flush=True)
tagger = StanfordPOSTagger(tagger_model)
print("Done.")
# =============================================================================
# 1) Start by projecting the language line to the gloss line in the dev set,
# remapping it from the CTN tagset to the universal tagset along the way.
# =============================================================================
#
# print("Processing DEV corpus...", end=' ', flush=True)
# for inst in dev_xc:
# word_align(inst.gloss, inst.lang)
# inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt')
# fix_ctn_gloss_line(inst, tag_method=MANUAL_POS)
# inst.tag_trans_pos(tagger)
# inst.heur_align() # Align trans/gloss lines heuristically
# inst.project_trans_to_gloss() # Now, project heuristically.
# print('done.')
#
# xigtxml.dump(open(ctn_dev_processed, 'w', encoding='utf-8'), dev_xc)
#
#
# print("Processing TRAIN Corpus...", end=' ', flush=True)
# # Get the language line words projected onto the gloss...
# for inst in train_xc:
# word_align(inst.gloss, inst.lang)
# inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt')
# inst.tag_trans_pos(tagger)
# inst.heur_align()
# inst.project_trans_to_gloss()
# fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ)
#
# print("Done.")
#
# xigtxml.dump(open(ctn_train_processed, 'w', encoding='utf-8'), train_xc)
# sys.exit()
print("Loading Processed CTN Train corpus...", end=" ", flush=True)
train_xc = RGCorpus.load(ctn_train_processed)
print("Done.")
print("Loading Processed CTN Dev corpus...", end=" ", flush=True)
dev_xc = RGCorpus.load(ctn_dev_processed)
print("Done.")
#
# # =============================================================================
# # 2) Train a classifier based on the projected gloss line.
# # =============================================================================
#
index_list = [35,70,106,141,284,569,854,1139,1424,1708,1993,7120]
for train_stop_index in index_list:
train_instances = list(train_xc)[0:train_stop_index]
print('* '*50)
tokens = 0
for inst in train_instances:
tokens += len(inst.gloss)
print("Now training with {} tokens, {} instances.".format(tokens, train_stop_index))
print("Training Classifier...", end=" ", flush=True)
c = instances_to_classifier(train_instances, './ctn-train.class',
tag_method=MANUAL_POS,
posdict=posdict,
context_feats=True,
feat_path='./ctn-train_feats.txt')
print("Done.")
# c = MalletMaxent('/Users/rgeorgi/Documents/code/dissertation/gc.classifier')
# c = MalletMaxent('./ctn_class.class.classifier')
print("Evaluating classifier...", end=" ", flush=True)
eval_classifier(c, dev_xc, posdict=posdict, context_feats=True)
print("Done.")
# eval_proj(dev_xc) | en | 0.464764 | The purpose of this module is to evaluate the POS-line classifiers trained on :param c: The classifier :param inst_list: A list of Igt instances to test against. Must already have POS tags. # Do the classification. # Fix the tags... # fix_ctn_gloss_line(to_tag, tag_method=INTENT_POS_CLASS) # Now, retrieve eval/gold. # Do the projection comparison # If the same tag occurs in the projections... Given a CTN gloss line, do some specific fixes to attempt to fix the CTN tag mapping. :param inst: :type inst:RGIgt # Get the gloss words # print("Loading CTN Dev Corpus...", end=" ", flush=True) # dev_xc = RGCorpus.load(ctn_dev) # print("Done.") # # print("Loading CTN Train corpus...", end=" ", flush=True) # train_xc = RGCorpus.load(ctn_train) # print("Done.") # ============================================================================= # 1) Start by projecting the language line to the gloss line in the dev set, # remapping it from the CTN tagset to the universal tagset along the way. # ============================================================================= # # print("Processing DEV corpus...", end=' ', flush=True) # for inst in dev_xc: # word_align(inst.gloss, inst.lang) # inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt') # fix_ctn_gloss_line(inst, tag_method=MANUAL_POS) # inst.tag_trans_pos(tagger) # inst.heur_align() # Align trans/gloss lines heuristically # inst.project_trans_to_gloss() # Now, project heuristically. # print('done.') # # xigtxml.dump(open(ctn_dev_processed, 'w', encoding='utf-8'), dev_xc) # # # print("Processing TRAIN Corpus...", end=' ', flush=True) # # Get the language line words projected onto the gloss... # for inst in train_xc: # word_align(inst.gloss, inst.lang) # inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt') # inst.tag_trans_pos(tagger) # inst.heur_align() # inst.project_trans_to_gloss() # fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ) # # print("Done.") # # xigtxml.dump(open(ctn_train_processed, 'w', encoding='utf-8'), train_xc) # sys.exit() # # # ============================================================================= # # 2) Train a classifier based on the projected gloss line. # # ============================================================================= # # c = MalletMaxent('/Users/rgeorgi/Documents/code/dissertation/gc.classifier') # c = MalletMaxent('./ctn_class.class.classifier') # eval_proj(dev_xc) | 2.080285 | 2 |
watchdog/back-end/v0.3.0/watchdog/app/resource/video.py | Havana3351/Low-cost-remote-monitor | 18 | 706 | <gh_stars>10-100
from flask_restful import Resource
from flask import Response
import os
import cv2
picturecounter = 1 # 防止过多记录的标识
class Video(Resource):
#如果方法为get 调用该方法
def get(self):
global picturecounter
# username = (request.get_json())['username']
# db = pymysql.connect("rm-2ze61i7u6d7a3fwp9yo.mysql.rds.aliyuncs.com", "team", "Aaa5225975", "pidata")
# cursor = db.cursor()
#
# sql = "select rpiname from user where username=\'" + username + "\'" # 可能存在类型问题
# cursor.execute(sql)
# row = cursor.fetchone()
#
# if not row:
# rpiname = None
# rpiname = str(row[0])
#覆盖取值
rpiname = 'raspberrypi'
# 获取指针并赋值
path = r'/root/video/realtime/%s' % (rpiname)
picnames = []
for filenames in os.walk(path):
picnames = filenames
print(picnames)
pointer = int(((picnames[2])[0].split('.'))[0])
picturecounter = pointer
picpath = r'/root/video/realtime/%s/%s.jpg' % (rpiname, picturecounter)
image = cv2.imread(picpath)
bs = cv2.imencode(".jpg", image)[1].tobytes()
picturecounter += 1
if(picturecounter > 5):
picturecounter = 1
return Response(bs, mimetype='image/jpeg')
def post(self):
print("post") | from flask_restful import Resource
from flask import Response
import os
import cv2
picturecounter = 1 # 防止过多记录的标识
class Video(Resource):
#如果方法为get 调用该方法
def get(self):
global picturecounter
# username = (request.get_json())['username']
# db = pymysql.connect("rm-2ze61i7u6d7a3fwp9yo.mysql.rds.aliyuncs.com", "team", "Aaa5225975", "pidata")
# cursor = db.cursor()
#
# sql = "select rpiname from user where username=\'" + username + "\'" # 可能存在类型问题
# cursor.execute(sql)
# row = cursor.fetchone()
#
# if not row:
# rpiname = None
# rpiname = str(row[0])
#覆盖取值
rpiname = 'raspberrypi'
# 获取指针并赋值
path = r'/root/video/realtime/%s' % (rpiname)
picnames = []
for filenames in os.walk(path):
picnames = filenames
print(picnames)
pointer = int(((picnames[2])[0].split('.'))[0])
picturecounter = pointer
picpath = r'/root/video/realtime/%s/%s.jpg' % (rpiname, picturecounter)
image = cv2.imread(picpath)
bs = cv2.imencode(".jpg", image)[1].tobytes()
picturecounter += 1
if(picturecounter > 5):
picturecounter = 1
return Response(bs, mimetype='image/jpeg')
def post(self):
print("post") | en | 0.309635 | # 防止过多记录的标识 #如果方法为get 调用该方法 # username = (request.get_json())['username'] # db = pymysql.connect("rm-2ze61i7u6d7a3fwp9yo.mysql.rds.aliyuncs.com", "team", "Aaa5225975", "pidata") # cursor = db.cursor() # # sql = "select rpiname from user where username=\'" + username + "\'" # 可能存在类型问题 # cursor.execute(sql) # row = cursor.fetchone() # # if not row: # rpiname = None # rpiname = str(row[0]) #覆盖取值 # 获取指针并赋值 | 2.833681 | 3 |
codes/ambfix.py | valgur/LEOGPS | 0 | 707 | <reponame>valgur/LEOGPS
#!/usr/bin/env python3
'''
###############################################################################
###############################################################################
## ##
## _ ___ ___ ___ ___ ___ ##
## | | | __ / \ / __| _ | __| ##
## | |__| __ ( ) | (_ | _|__ \ ##
## |____|___ \___/ \___|_| \___/ ##
## v 1.0 (Stable) ##
## ##
## FILE DESCRIPTION: ##
## ##
## This is the classical LAMBDA method that was originally authored by ##
## Teunissen, Jonge, and Tiberius (1993). The code was later written in ##
## MATLAB by Dr <NAME> and Dr <NAME>. It takes in a vector of ##
## float ambiguities to the integer least-squares problem, and covariance ##
## of the float ambiguities. It then runs the LAMBDA's ILS search-&-shrink ##
## and spits out the ambiguity integers. The other 5 methods in original ##
## LAMBDA MATLAB code are not supported here (feel free to edit the code ##
## and implement it youself!). The default ncands = 2, as per original code. ##
## All support functions from the original MATLAB code (decorrel, ldldecom) ##
## have been nested within the main function as sub functions. ##
## ##
## INPUTS: ##
## ##
## - ahat : numpy array of float ambiguities ##
## - Qahat : numpy covariance matrix for float ambiguities ##
## - ncands : number of candidates (optional parameter, default = 2) ##
## ##
## OUTPUT: ##
## ##
## - afixed : Array of size (n x ncands) with the estimated integer ##
## candidates, sorted according to the corresponding squared ##
## norms, best candidate first. ##
## - sqnorm : Distance between integer candidate and float ambiguity ##
## vectors in the metric of the variance-covariance matrix. ##
## ##
## REMARKS: ##
## ##
## Besides above changes, mostly syntax changes to this Python version: ##
## - Everything is identical EXCEPT MATLAB is ones-based indexing. ##
## - Python is zeros-based indexing, and range function does not ##
## include the upper limit index. Thus, only indices have changed. ##
## - Example in MATLAB: for i = 1:5 => {1,2,3,4,5} ##
## - Equivalently in Python: for i in range(0,5) => {0,1,2,3,4} ##
## - Indices are thus updated accordingly. ##
## ##
## DEVELOPER: Professor <NAME> (TU Delft) ##
## ORIGINAL AUTHOR: <NAME> and <NAME> (TU Delft) ##
## AUTHOR MODIFIED: 26-07-2019, by <NAME>, with permissions. ##
## ##
###############################################################################
###############################################################################
'''
import numpy as np
def LAMBDA( ahat, Qahat, ncands = 2 ):
###########################################################################
###########################################################################
# [afixed, sqnorm] = LAMBDA( ahat, Qahat, ncands )
#
# This is the main routine of the LAMBDA software package. By default the
# ILS method will be used for integer estimation based on the provided
# float ambiguity vector ahat and associated variance-covariance matrix
# Qahat. In this Pythonic version (modified by <NAME>, 2019), only
# the ILS method is implemented. For other techniques: integer rounding,
# bootstrapping or Partial Ambiguity Resolution (PAR), the user is free
# to modify this code and adapt it to their own needs.
#
# NOTE 1: LAMBDA always first applies a decorrelation before the integer
# estimation (for ILS this is required to guarantee an efficient search,
# for rounding and bootstrapping it is required in order to get higher
# success rates).
#
# INPUTS:
#
# ahat: Float ambiguities (must be a column!)
# Qahat: Variance/covariance matrix of ambiguities
# ncands: number of candidates (optional parameter, default = 2)
#
# OUTPUTS:
#
# afixed: Array of size (n x ncands) with the estimated integer
# candidates, sorted according to the corresponding squared
# norms, best candidate first.
# sqnorm: Distance between integer candidate and float ambiguity vectors
# in the metric of the variance-covariance matrix Qahat.
# Only available for ILS.
#
# -------------------------------------------------------------------------
# Release date : 1-SEPT-2012
# Authors : <NAME> and <NAME>
#
# GNSS Research Centre, Curtin University
# Mathematical Geodesy and Positioning, Delft University of Technology
# -------------------------------------------------------------------------
#
# REFERENCES:
# 1. LAMBDA Software Package: Matlab implementation, Version 3.0.
# Documentation provided with this software package.
# 2. Teunissen P (1993) Least-squares estimation of the integer GPS
# ambiguities. In: Invited lecture, section IV theory and methodology,
# IAG General Meeting, Beijing, China
# 3. Teunissen P (1995) The least-squares ambiguity decorrelation
# adjustment: a method for fast GPS ambiguity estitmation. J Geod
# 70:651-7
# 4. <NAME>, <NAME> (1996) The LAMBDA method of intger ambiguity
# estimation:implementation aspects.
# 5. Chang X ,<NAME>, <NAME> (2005) MLAMBDA: a modified LAMBDA method for
# integer least-squares estimation
###########################################################################
###########################################################################
''' A function for obtaining the decimals only from float arrays '''
def floatrem( fltarray ):
# This function is NECESSARY because of the differences between:
# MATLAB's rem function
# (computes the true mathematical remainder)
# And Python's modulo % operator
# (computes remainder complementary to the floor_divide function)
fltarray = np.array(fltarray)
fltarray = fltarray + 0.000001
intarray = fltarray.astype(int)
decarray = fltarray - intarray
return decarray, intarray
###########################################################################
###########################################################################
''' A function to perform LtDL decomposition of the covariance matrix '''
def ldldecom( Qahat1 ):
# This routine finds the LtDL decomposition of a given variance or
# covariance matrix.
#
# Input arguments:
# Qahat: Symmetric n by n matrix to be factored
#
# Output arguments:
# L: n by n factor matrix (strict lower triangular)
# D: Diagonal n-vector
# ------------------------------------------------------------------
# File.....: ldldecom
# Date.....: 19-MAY-1999
# Author...: <NAME>
# Mathematical Geodesy and Positioning
# Delft University of Technology
# ------------------------------------------------------------------
Qahat2 = Qahat1.copy()
# If we do not use copy, we will overwrite the original Qahat...
# ... even the one outside the function! This doesn't occur in MATLAB.
n = len(Qahat2)
D = np.zeros((n))
L = np.zeros((n,n))
for i in range(n-1,-1,-1):
D[i] = Qahat2[i][i]
L[i,0:i+1] = Qahat2[i,0:i+1] / ((Qahat2[i][i])**0.5)
for j in range(0,i):
Qahat2[j,0:j+1] = Qahat2[j,0:j+1] - L[i,0:j+1]*L[i][j]
L[i,0:i+1] = L[i,0:i+1] / L[i][i]
return L,D
###########################################################################
###########################################################################
''' Decorrelation function for LAMBDA '''
def decorrel( ahat, Qahat ):
# function [Qzhat,Z,L,D,zhat,iZt] = decorrel (Qahat,ahat)
# DECORREL: Decorrelate a (co)variance matrix of ambiguities
#
# [Qzhat,Z,L,D,zhat] = decorrel (Qahat,ahat)
#
# This routine creates a decorrelated Q-matrix, by finding the
# Z-matrix and performing the corresponding transformation.
#
# The method is described in:
# The routine is based on Fortran routines written by <NAME>
# and on Matlab-routines written by <NAME>.
# The resulting Z-matrix can be used as follows:
# zhat = Zt * ahat; \hat(z) = Z' * \hat(a);
# Q_\hat(z) = Z' * Q_\hat(a) * Z
#
# Input arguments:
# Qahat: Variance-covariance matrix of ambiguities (original)
# ahat: Original ambiguities (optional)
#
# Output arguments:
# Qzhat: Variance-covariance matrix of decorrelated ambiguities
# Z: Z-transformation matrix
# L: L matrix (from LtDL-decomposition of Qzhat)
# D: D matrix (from LtDL-decomposition of Qzhat)
# zhat: Transformed ambiguities (optional)
# iZt: inv(Z')-transformation matrix
#
# ------------------------------------------------------------------
# Function.: decorrel
# Date.....: 19-MAY-1999 / modified 12-APRIL-2012
# Author...: <NAME> / <NAME>
# Mathematical Geodesy and Positioning
# Delft University of Technology
# Modified.: <NAME>, July 2019, DSO National Laboratories
# ------------------------------------------------------------------
# Initialisations
n = len(Qahat)
iZt = np.identity(n)
i1 = n - 1
sw = True
# LtDL decomposition
L, D = ldldecom(Qahat)
while sw == 1:
i = n # Loop for column from n to 1
sw = 0
while sw == 0 and i > 1:
i = i - 1 # The i-th column
if i <= i1:
for j in range(i,n):
# We have to do some manual coding here, as python's
# rounding for .5's are different from MATLAB's
mu = L[j,i-1] # Get the float mu
mu_dec = mu%1 # Get the decimal float of mu
if mu_dec == 0.5:
mu += 0.01 # Just to make it round up properly.
mu = round(mu)
if mu != 0.0:
L[j:n,i-1] = L[j:n,i-1] - mu * L[j:n,j]
iZt[:,j] = iZt[:,j] + mu * iZt[:,i-1]
delta = D[i-1] + (L[i,i-1]**2) * D[i]
if delta < D[i]:
lam = D[i] * L[i,i-1] / delta
eta = D[i-1] / delta
D[i-1] = eta * D[i]
D[i] = delta
mult1 = np.array([-1*L[i,i-1], 1])
mult2 = np.array([eta,lam])
mult3 = np.stack((mult1,mult2))
L[i-1:i+1,0:i-1] = np.matmul(mult3,L[i-1:i+1,0:i-1])
L[i,i-1] = lam
# Flip rows i and i+1
L[i+1:n,i-1:i+1] = np.flip(L[i+1:n,i-1:i+1], axis=0)
iZt[:,i-1:i+1] = np.flip(iZt[:,i-1:i+1], axis=0)
i1 = i
sw = 1
iZt = iZt + 0.000001 # Resolves Python 3's rounding definition
Z = np.round(np.linalg.inv(iZt.transpose()))
Qzhat = np.matmul( Qahat, Z )
Qzhat = np.matmul( Z.transpose(), Qzhat )
zhat = np.matmul(Z.transpose(),ahat)
iZt = np.round(iZt)
return Qzhat, Z, L, D, zhat, iZt
###########################################################################
###########################################################################
def ssearch( ahat, L, D, ncands):
#------------------------------------------------------------------|
#
# Integer ambiguity vector search via search-and-shrink technique.
#
# INPUTS:
#
# ahat : Float ambiguities (should be decorrelated for
# computational efficiency)
# L,D : LtDL-decomposition of the variance-covariance matrix
# of the float ambiguities ahat
# ncands: Number of requested candidates
#
# OUTPUTS:
#
# afixed: estimated integers (n, x, ncands)
# sqnorm: corresponding squared norms (n-vector, ascending order)
#
#------------------------------------------------------------------|
# Date : 02-SEPT-2010 |
# Author : <NAME> |
# GNSS Research Center, Department of Spatial Sciences |
# Curtin University of Technology |
# E-mail : <EMAIL> |
#------------------------------------------------------------------|
# First, check that float ambiguity and D have same length
if len(ahat) != len(D):
print('Error! Float ambiguity vector must be a column vector!')
print('It must also have the same dimension as D')
return None
# Initialising outputs
n = len(ahat)
afixed = np.zeros((n, ncands))
sqnorm = np.zeros(ncands)
# Initializing the variables for searching
Chi2 = 1.0e+18 # Start search with an infinite chi-square
dist = np.zeros(n) # MATLAB distance function
endsearch = False # Search trigger
count = 0 # Count the number of candidates
acond = np.zeros(n)
acond[n-1] = ahat[n-1]
zcond = np.zeros(n)
zcond[n-1] = np.round(acond[n-1]+0.000001)
left = acond[n-1] - zcond[n-1]
step = np.zeros(n)
step[n-1] = np.sign(left)
if step[n-1] == 0:
step[n-1] = 1 # Give a positive step.
imax = ncands - 1 # Initially, the maximum F(z) is at ncands
S = np.zeros((n,n)) # Used to compute conditional ambiguities
k = n
# Now we start the main search loop.
while endsearch == False:
newdist = dist[k-1] + (left**2) / D[k-1]
if newdist < Chi2:
if k != 1: # Case 1: move down
k -= 1
dist[k-1] = newdist
S[k-1,0:k] = S[k,0:k] + (zcond[k] - acond[k])*L[k,0:k]
acond[k-1] = ahat[k-1] + S[k-1,k-1]
zcond[k-1] = np.round(acond[k-1]+0.000001)
left = acond[k-1] - zcond[k-1]
step[k-1] = np.sign(left)
if step[k-1] == 0: # Very rarely would this happen...
step[k-1] = 1 # ... but just in case, you know.
else: # Case 2: store the found candidate and try the next.
if count < (ncands - 1):
# Store the 1st ncands-1 initial points as candidates
count += 1
afixed[:,count-1] = zcond[0:n];
sqnorm[count-1] = newdist # Store F(zcond)
else:
afixed[:,imax] = zcond[0:n]
sqnorm[imax] = newdist
Chi2 = max(sqnorm)
imax = np.argmax(sqnorm) # No need to add '-1' to imax
zcond[0] = zcond[0] + step[0]
left = acond[0] - zcond[0]
step[0] = -1*step[0] - np.sign(step[0])
else: # Case 3: exit or move up
if k == n:
endsearch = True
else:
k += 1 # Move up
zcond[k-1] = zcond[k-1] + step[k-1]
left = acond[k-1] - zcond[k-1]
step[k-1] = -1*step[k-1] - np.sign(step[k-1])
order = np.argsort(sqnorm) # Get an array of INDICES for a sort.
sqnormf = np.sort(sqnorm) # Get an array of ACTUAL SORTS for sqnorm.
afixedf = np.copy(afixed)
for k in range(0,len(order)):
afixedf[:,k] = afixed[:,order[k]]
return afixedf, sqnormf
###########################################################################
###########################################################################
''' Initialisation and some initial sanity checks... '''
# Initialise all output variables
sqnorm = np.array([])
# Test inputs: Is the Q-matrix symmetric?
if np.array_equal(Qahat,Qahat.transpose()) == False:
print('Variance-covariance matrix is not symmetric!')
return None
# Test inputs: Is the Q-matrix positive-definite?
if np.sum(np.linalg.eig(Qahat)[0] > 0.0) != len(Qahat):
print('Variance-covariance matrix is not positive definite!')
return None
# Test inputs: Does Q-matrix and amb vector have identical dimensions?
if len(ahat) != len(Qahat):
print('Variance-covariance matrix and vector of ambiguities...')
print('... do not have identical dimensions!')
return None
###########################################################################
###########################################################################
''' Begin least-squares ambiguity decorrelation adjustment! '''
# Remove integer numbers from float solution, so that all values are
# between -1 and 1 (for computational convenience only)
ahat, incr = floatrem( ahat )
# Compute Z matrix based on the decomposition Q=L^T*D*L;
Qzhat, Z, L, D, zhat, iZt = decorrel( ahat, Qahat )
# Integer ambiguity vector search via search-and-shrink
zfixedff, sqnormff = ssearch( zhat, L, D, ncands )
# Perform the back-transformation and add the increments
afixed = np.matmul(iZt,zfixedff)
repmat = np.repeat(np.array([incr]),ncands,axis=0)
repmat = repmat.transpose()
afixed = afixed + repmat
afixed = afixed.transpose()
###########################################################################
###########################################################################
''' Returns best amb-fix, second best amb-fix, and the square norm '''
return afixed, sqnorm
###########################################################################
###########################################################################
| #!/usr/bin/env python3
'''
###############################################################################
###############################################################################
## ##
## _ ___ ___ ___ ___ ___ ##
## | | | __ / \ / __| _ | __| ##
## | |__| __ ( ) | (_ | _|__ \ ##
## |____|___ \___/ \___|_| \___/ ##
## v 1.0 (Stable) ##
## ##
## FILE DESCRIPTION: ##
## ##
## This is the classical LAMBDA method that was originally authored by ##
## Teunissen, Jonge, and Tiberius (1993). The code was later written in ##
## MATLAB by Dr <NAME> and Dr <NAME>. It takes in a vector of ##
## float ambiguities to the integer least-squares problem, and covariance ##
## of the float ambiguities. It then runs the LAMBDA's ILS search-&-shrink ##
## and spits out the ambiguity integers. The other 5 methods in original ##
## LAMBDA MATLAB code are not supported here (feel free to edit the code ##
## and implement it youself!). The default ncands = 2, as per original code. ##
## All support functions from the original MATLAB code (decorrel, ldldecom) ##
## have been nested within the main function as sub functions. ##
## ##
## INPUTS: ##
## ##
## - ahat : numpy array of float ambiguities ##
## - Qahat : numpy covariance matrix for float ambiguities ##
## - ncands : number of candidates (optional parameter, default = 2) ##
## ##
## OUTPUT: ##
## ##
## - afixed : Array of size (n x ncands) with the estimated integer ##
## candidates, sorted according to the corresponding squared ##
## norms, best candidate first. ##
## - sqnorm : Distance between integer candidate and float ambiguity ##
## vectors in the metric of the variance-covariance matrix. ##
## ##
## REMARKS: ##
## ##
## Besides above changes, mostly syntax changes to this Python version: ##
## - Everything is identical EXCEPT MATLAB is ones-based indexing. ##
## - Python is zeros-based indexing, and range function does not ##
## include the upper limit index. Thus, only indices have changed. ##
## - Example in MATLAB: for i = 1:5 => {1,2,3,4,5} ##
## - Equivalently in Python: for i in range(0,5) => {0,1,2,3,4} ##
## - Indices are thus updated accordingly. ##
## ##
## DEVELOPER: Professor <NAME> (TU Delft) ##
## ORIGINAL AUTHOR: <NAME> and <NAME> (TU Delft) ##
## AUTHOR MODIFIED: 26-07-2019, by <NAME>, with permissions. ##
## ##
###############################################################################
###############################################################################
'''
import numpy as np
def LAMBDA( ahat, Qahat, ncands = 2 ):
###########################################################################
###########################################################################
# [afixed, sqnorm] = LAMBDA( ahat, Qahat, ncands )
#
# This is the main routine of the LAMBDA software package. By default the
# ILS method will be used for integer estimation based on the provided
# float ambiguity vector ahat and associated variance-covariance matrix
# Qahat. In this Pythonic version (modified by <NAME>, 2019), only
# the ILS method is implemented. For other techniques: integer rounding,
# bootstrapping or Partial Ambiguity Resolution (PAR), the user is free
# to modify this code and adapt it to their own needs.
#
# NOTE 1: LAMBDA always first applies a decorrelation before the integer
# estimation (for ILS this is required to guarantee an efficient search,
# for rounding and bootstrapping it is required in order to get higher
# success rates).
#
# INPUTS:
#
# ahat: Float ambiguities (must be a column!)
# Qahat: Variance/covariance matrix of ambiguities
# ncands: number of candidates (optional parameter, default = 2)
#
# OUTPUTS:
#
# afixed: Array of size (n x ncands) with the estimated integer
# candidates, sorted according to the corresponding squared
# norms, best candidate first.
# sqnorm: Distance between integer candidate and float ambiguity vectors
# in the metric of the variance-covariance matrix Qahat.
# Only available for ILS.
#
# -------------------------------------------------------------------------
# Release date : 1-SEPT-2012
# Authors : <NAME> and <NAME>
#
# GNSS Research Centre, Curtin University
# Mathematical Geodesy and Positioning, Delft University of Technology
# -------------------------------------------------------------------------
#
# REFERENCES:
# 1. LAMBDA Software Package: Matlab implementation, Version 3.0.
# Documentation provided with this software package.
# 2. Teunissen P (1993) Least-squares estimation of the integer GPS
# ambiguities. In: Invited lecture, section IV theory and methodology,
# IAG General Meeting, Beijing, China
# 3. Teunissen P (1995) The least-squares ambiguity decorrelation
# adjustment: a method for fast GPS ambiguity estitmation. J Geod
# 70:651-7
# 4. <NAME>, <NAME> (1996) The LAMBDA method of intger ambiguity
# estimation:implementation aspects.
# 5. Chang X ,<NAME>, <NAME> (2005) MLAMBDA: a modified LAMBDA method for
# integer least-squares estimation
###########################################################################
###########################################################################
''' A function for obtaining the decimals only from float arrays '''
def floatrem( fltarray ):
# This function is NECESSARY because of the differences between:
# MATLAB's rem function
# (computes the true mathematical remainder)
# And Python's modulo % operator
# (computes remainder complementary to the floor_divide function)
fltarray = np.array(fltarray)
fltarray = fltarray + 0.000001
intarray = fltarray.astype(int)
decarray = fltarray - intarray
return decarray, intarray
###########################################################################
###########################################################################
''' A function to perform LtDL decomposition of the covariance matrix '''
def ldldecom( Qahat1 ):
# This routine finds the LtDL decomposition of a given variance or
# covariance matrix.
#
# Input arguments:
# Qahat: Symmetric n by n matrix to be factored
#
# Output arguments:
# L: n by n factor matrix (strict lower triangular)
# D: Diagonal n-vector
# ------------------------------------------------------------------
# File.....: ldldecom
# Date.....: 19-MAY-1999
# Author...: <NAME>
# Mathematical Geodesy and Positioning
# Delft University of Technology
# ------------------------------------------------------------------
Qahat2 = Qahat1.copy()
# If we do not use copy, we will overwrite the original Qahat...
# ... even the one outside the function! This doesn't occur in MATLAB.
n = len(Qahat2)
D = np.zeros((n))
L = np.zeros((n,n))
for i in range(n-1,-1,-1):
D[i] = Qahat2[i][i]
L[i,0:i+1] = Qahat2[i,0:i+1] / ((Qahat2[i][i])**0.5)
for j in range(0,i):
Qahat2[j,0:j+1] = Qahat2[j,0:j+1] - L[i,0:j+1]*L[i][j]
L[i,0:i+1] = L[i,0:i+1] / L[i][i]
return L,D
###########################################################################
###########################################################################
''' Decorrelation function for LAMBDA '''
def decorrel( ahat, Qahat ):
# function [Qzhat,Z,L,D,zhat,iZt] = decorrel (Qahat,ahat)
# DECORREL: Decorrelate a (co)variance matrix of ambiguities
#
# [Qzhat,Z,L,D,zhat] = decorrel (Qahat,ahat)
#
# This routine creates a decorrelated Q-matrix, by finding the
# Z-matrix and performing the corresponding transformation.
#
# The method is described in:
# The routine is based on Fortran routines written by <NAME>
# and on Matlab-routines written by <NAME>.
# The resulting Z-matrix can be used as follows:
# zhat = Zt * ahat; \hat(z) = Z' * \hat(a);
# Q_\hat(z) = Z' * Q_\hat(a) * Z
#
# Input arguments:
# Qahat: Variance-covariance matrix of ambiguities (original)
# ahat: Original ambiguities (optional)
#
# Output arguments:
# Qzhat: Variance-covariance matrix of decorrelated ambiguities
# Z: Z-transformation matrix
# L: L matrix (from LtDL-decomposition of Qzhat)
# D: D matrix (from LtDL-decomposition of Qzhat)
# zhat: Transformed ambiguities (optional)
# iZt: inv(Z')-transformation matrix
#
# ------------------------------------------------------------------
# Function.: decorrel
# Date.....: 19-MAY-1999 / modified 12-APRIL-2012
# Author...: <NAME> / <NAME>
# Mathematical Geodesy and Positioning
# Delft University of Technology
# Modified.: <NAME>, July 2019, DSO National Laboratories
# ------------------------------------------------------------------
# Initialisations
n = len(Qahat)
iZt = np.identity(n)
i1 = n - 1
sw = True
# LtDL decomposition
L, D = ldldecom(Qahat)
while sw == 1:
i = n # Loop for column from n to 1
sw = 0
while sw == 0 and i > 1:
i = i - 1 # The i-th column
if i <= i1:
for j in range(i,n):
# We have to do some manual coding here, as python's
# rounding for .5's are different from MATLAB's
mu = L[j,i-1] # Get the float mu
mu_dec = mu%1 # Get the decimal float of mu
if mu_dec == 0.5:
mu += 0.01 # Just to make it round up properly.
mu = round(mu)
if mu != 0.0:
L[j:n,i-1] = L[j:n,i-1] - mu * L[j:n,j]
iZt[:,j] = iZt[:,j] + mu * iZt[:,i-1]
delta = D[i-1] + (L[i,i-1]**2) * D[i]
if delta < D[i]:
lam = D[i] * L[i,i-1] / delta
eta = D[i-1] / delta
D[i-1] = eta * D[i]
D[i] = delta
mult1 = np.array([-1*L[i,i-1], 1])
mult2 = np.array([eta,lam])
mult3 = np.stack((mult1,mult2))
L[i-1:i+1,0:i-1] = np.matmul(mult3,L[i-1:i+1,0:i-1])
L[i,i-1] = lam
# Flip rows i and i+1
L[i+1:n,i-1:i+1] = np.flip(L[i+1:n,i-1:i+1], axis=0)
iZt[:,i-1:i+1] = np.flip(iZt[:,i-1:i+1], axis=0)
i1 = i
sw = 1
iZt = iZt + 0.000001 # Resolves Python 3's rounding definition
Z = np.round(np.linalg.inv(iZt.transpose()))
Qzhat = np.matmul( Qahat, Z )
Qzhat = np.matmul( Z.transpose(), Qzhat )
zhat = np.matmul(Z.transpose(),ahat)
iZt = np.round(iZt)
return Qzhat, Z, L, D, zhat, iZt
###########################################################################
###########################################################################
def ssearch( ahat, L, D, ncands):
#------------------------------------------------------------------|
#
# Integer ambiguity vector search via search-and-shrink technique.
#
# INPUTS:
#
# ahat : Float ambiguities (should be decorrelated for
# computational efficiency)
# L,D : LtDL-decomposition of the variance-covariance matrix
# of the float ambiguities ahat
# ncands: Number of requested candidates
#
# OUTPUTS:
#
# afixed: estimated integers (n, x, ncands)
# sqnorm: corresponding squared norms (n-vector, ascending order)
#
#------------------------------------------------------------------|
# Date : 02-SEPT-2010 |
# Author : <NAME> |
# GNSS Research Center, Department of Spatial Sciences |
# Curtin University of Technology |
# E-mail : <EMAIL> |
#------------------------------------------------------------------|
# First, check that float ambiguity and D have same length
if len(ahat) != len(D):
print('Error! Float ambiguity vector must be a column vector!')
print('It must also have the same dimension as D')
return None
# Initialising outputs
n = len(ahat)
afixed = np.zeros((n, ncands))
sqnorm = np.zeros(ncands)
# Initializing the variables for searching
Chi2 = 1.0e+18 # Start search with an infinite chi-square
dist = np.zeros(n) # MATLAB distance function
endsearch = False # Search trigger
count = 0 # Count the number of candidates
acond = np.zeros(n)
acond[n-1] = ahat[n-1]
zcond = np.zeros(n)
zcond[n-1] = np.round(acond[n-1]+0.000001)
left = acond[n-1] - zcond[n-1]
step = np.zeros(n)
step[n-1] = np.sign(left)
if step[n-1] == 0:
step[n-1] = 1 # Give a positive step.
imax = ncands - 1 # Initially, the maximum F(z) is at ncands
S = np.zeros((n,n)) # Used to compute conditional ambiguities
k = n
# Now we start the main search loop.
while endsearch == False:
newdist = dist[k-1] + (left**2) / D[k-1]
if newdist < Chi2:
if k != 1: # Case 1: move down
k -= 1
dist[k-1] = newdist
S[k-1,0:k] = S[k,0:k] + (zcond[k] - acond[k])*L[k,0:k]
acond[k-1] = ahat[k-1] + S[k-1,k-1]
zcond[k-1] = np.round(acond[k-1]+0.000001)
left = acond[k-1] - zcond[k-1]
step[k-1] = np.sign(left)
if step[k-1] == 0: # Very rarely would this happen...
step[k-1] = 1 # ... but just in case, you know.
else: # Case 2: store the found candidate and try the next.
if count < (ncands - 1):
# Store the 1st ncands-1 initial points as candidates
count += 1
afixed[:,count-1] = zcond[0:n];
sqnorm[count-1] = newdist # Store F(zcond)
else:
afixed[:,imax] = zcond[0:n]
sqnorm[imax] = newdist
Chi2 = max(sqnorm)
imax = np.argmax(sqnorm) # No need to add '-1' to imax
zcond[0] = zcond[0] + step[0]
left = acond[0] - zcond[0]
step[0] = -1*step[0] - np.sign(step[0])
else: # Case 3: exit or move up
if k == n:
endsearch = True
else:
k += 1 # Move up
zcond[k-1] = zcond[k-1] + step[k-1]
left = acond[k-1] - zcond[k-1]
step[k-1] = -1*step[k-1] - np.sign(step[k-1])
order = np.argsort(sqnorm) # Get an array of INDICES for a sort.
sqnormf = np.sort(sqnorm) # Get an array of ACTUAL SORTS for sqnorm.
afixedf = np.copy(afixed)
for k in range(0,len(order)):
afixedf[:,k] = afixed[:,order[k]]
return afixedf, sqnormf
###########################################################################
###########################################################################
''' Initialisation and some initial sanity checks... '''
# Initialise all output variables
sqnorm = np.array([])
# Test inputs: Is the Q-matrix symmetric?
if np.array_equal(Qahat,Qahat.transpose()) == False:
print('Variance-covariance matrix is not symmetric!')
return None
# Test inputs: Is the Q-matrix positive-definite?
if np.sum(np.linalg.eig(Qahat)[0] > 0.0) != len(Qahat):
print('Variance-covariance matrix is not positive definite!')
return None
# Test inputs: Does Q-matrix and amb vector have identical dimensions?
if len(ahat) != len(Qahat):
print('Variance-covariance matrix and vector of ambiguities...')
print('... do not have identical dimensions!')
return None
###########################################################################
###########################################################################
''' Begin least-squares ambiguity decorrelation adjustment! '''
# Remove integer numbers from float solution, so that all values are
# between -1 and 1 (for computational convenience only)
ahat, incr = floatrem( ahat )
# Compute Z matrix based on the decomposition Q=L^T*D*L;
Qzhat, Z, L, D, zhat, iZt = decorrel( ahat, Qahat )
# Integer ambiguity vector search via search-and-shrink
zfixedff, sqnormff = ssearch( zhat, L, D, ncands )
# Perform the back-transformation and add the increments
afixed = np.matmul(iZt,zfixedff)
repmat = np.repeat(np.array([incr]),ncands,axis=0)
repmat = repmat.transpose()
afixed = afixed + repmat
afixed = afixed.transpose()
###########################################################################
###########################################################################
''' Returns best amb-fix, second best amb-fix, and the square norm '''
return afixed, sqnorm
###########################################################################
########################################################################### | en | 0.526539 | #!/usr/bin/env python3 ############################################################################### ############################################################################### ## ## ## _ ___ ___ ___ ___ ___ ## ## | | | __ / \ / __| _ | __| ## ## | |__| __ ( ) | (_ | _|__ \ ## ## |____|___ \___/ \___|_| \___/ ## ## v 1.0 (Stable) ## ## ## ## FILE DESCRIPTION: ## ## ## ## This is the classical LAMBDA method that was originally authored by ## ## Teunissen, Jonge, and Tiberius (1993). The code was later written in ## ## MATLAB by Dr <NAME> and Dr <NAME>. It takes in a vector of ## ## float ambiguities to the integer least-squares problem, and covariance ## ## of the float ambiguities. It then runs the LAMBDA's ILS search-&-shrink ## ## and spits out the ambiguity integers. The other 5 methods in original ## ## LAMBDA MATLAB code are not supported here (feel free to edit the code ## ## and implement it youself!). The default ncands = 2, as per original code. ## ## All support functions from the original MATLAB code (decorrel, ldldecom) ## ## have been nested within the main function as sub functions. ## ## ## ## INPUTS: ## ## ## ## - ahat : numpy array of float ambiguities ## ## - Qahat : numpy covariance matrix for float ambiguities ## ## - ncands : number of candidates (optional parameter, default = 2) ## ## ## ## OUTPUT: ## ## ## ## - afixed : Array of size (n x ncands) with the estimated integer ## ## candidates, sorted according to the corresponding squared ## ## norms, best candidate first. ## ## - sqnorm : Distance between integer candidate and float ambiguity ## ## vectors in the metric of the variance-covariance matrix. ## ## ## ## REMARKS: ## ## ## ## Besides above changes, mostly syntax changes to this Python version: ## ## - Everything is identical EXCEPT MATLAB is ones-based indexing. ## ## - Python is zeros-based indexing, and range function does not ## ## include the upper limit index. Thus, only indices have changed. ## ## - Example in MATLAB: for i = 1:5 => {1,2,3,4,5} ## ## - Equivalently in Python: for i in range(0,5) => {0,1,2,3,4} ## ## - Indices are thus updated accordingly. ## ## ## ## DEVELOPER: Professor <NAME> (TU Delft) ## ## ORIGINAL AUTHOR: <NAME> and <NAME> (TU Delft) ## ## AUTHOR MODIFIED: 26-07-2019, by <NAME>, with permissions. ## ## ## ############################################################################### ############################################################################### ########################################################################### ########################################################################### # [afixed, sqnorm] = LAMBDA( ahat, Qahat, ncands ) # # This is the main routine of the LAMBDA software package. By default the # ILS method will be used for integer estimation based on the provided # float ambiguity vector ahat and associated variance-covariance matrix # Qahat. In this Pythonic version (modified by <NAME>, 2019), only # the ILS method is implemented. For other techniques: integer rounding, # bootstrapping or Partial Ambiguity Resolution (PAR), the user is free # to modify this code and adapt it to their own needs. # # NOTE 1: LAMBDA always first applies a decorrelation before the integer # estimation (for ILS this is required to guarantee an efficient search, # for rounding and bootstrapping it is required in order to get higher # success rates). # # INPUTS: # # ahat: Float ambiguities (must be a column!) # Qahat: Variance/covariance matrix of ambiguities # ncands: number of candidates (optional parameter, default = 2) # # OUTPUTS: # # afixed: Array of size (n x ncands) with the estimated integer # candidates, sorted according to the corresponding squared # norms, best candidate first. # sqnorm: Distance between integer candidate and float ambiguity vectors # in the metric of the variance-covariance matrix Qahat. # Only available for ILS. # # ------------------------------------------------------------------------- # Release date : 1-SEPT-2012 # Authors : <NAME> and <NAME> # # GNSS Research Centre, Curtin University # Mathematical Geodesy and Positioning, Delft University of Technology # ------------------------------------------------------------------------- # # REFERENCES: # 1. LAMBDA Software Package: Matlab implementation, Version 3.0. # Documentation provided with this software package. # 2. Teunissen P (1993) Least-squares estimation of the integer GPS # ambiguities. In: Invited lecture, section IV theory and methodology, # IAG General Meeting, Beijing, China # 3. Teunissen P (1995) The least-squares ambiguity decorrelation # adjustment: a method for fast GPS ambiguity estitmation. J Geod # 70:651-7 # 4. <NAME>, <NAME> (1996) The LAMBDA method of intger ambiguity # estimation:implementation aspects. # 5. Chang X ,<NAME>, <NAME> (2005) MLAMBDA: a modified LAMBDA method for # integer least-squares estimation ########################################################################### ########################################################################### A function for obtaining the decimals only from float arrays # This function is NECESSARY because of the differences between: # MATLAB's rem function # (computes the true mathematical remainder) # And Python's modulo % operator # (computes remainder complementary to the floor_divide function) ########################################################################### ########################################################################### A function to perform LtDL decomposition of the covariance matrix # This routine finds the LtDL decomposition of a given variance or # covariance matrix. # # Input arguments: # Qahat: Symmetric n by n matrix to be factored # # Output arguments: # L: n by n factor matrix (strict lower triangular) # D: Diagonal n-vector # ------------------------------------------------------------------ # File.....: ldldecom # Date.....: 19-MAY-1999 # Author...: <NAME> # Mathematical Geodesy and Positioning # Delft University of Technology # ------------------------------------------------------------------ # If we do not use copy, we will overwrite the original Qahat... # ... even the one outside the function! This doesn't occur in MATLAB. ########################################################################### ########################################################################### Decorrelation function for LAMBDA # function [Qzhat,Z,L,D,zhat,iZt] = decorrel (Qahat,ahat) # DECORREL: Decorrelate a (co)variance matrix of ambiguities # # [Qzhat,Z,L,D,zhat] = decorrel (Qahat,ahat) # # This routine creates a decorrelated Q-matrix, by finding the # Z-matrix and performing the corresponding transformation. # # The method is described in: # The routine is based on Fortran routines written by <NAME> # and on Matlab-routines written by <NAME>. # The resulting Z-matrix can be used as follows: # zhat = Zt * ahat; \hat(z) = Z' * \hat(a); # Q_\hat(z) = Z' * Q_\hat(a) * Z # # Input arguments: # Qahat: Variance-covariance matrix of ambiguities (original) # ahat: Original ambiguities (optional) # # Output arguments: # Qzhat: Variance-covariance matrix of decorrelated ambiguities # Z: Z-transformation matrix # L: L matrix (from LtDL-decomposition of Qzhat) # D: D matrix (from LtDL-decomposition of Qzhat) # zhat: Transformed ambiguities (optional) # iZt: inv(Z')-transformation matrix # # ------------------------------------------------------------------ # Function.: decorrel # Date.....: 19-MAY-1999 / modified 12-APRIL-2012 # Author...: <NAME> / <NAME> # Mathematical Geodesy and Positioning # Delft University of Technology # Modified.: <NAME>, July 2019, DSO National Laboratories # ------------------------------------------------------------------ # Initialisations # LtDL decomposition # Loop for column from n to 1 # The i-th column # We have to do some manual coding here, as python's # rounding for .5's are different from MATLAB's # Get the float mu # Get the decimal float of mu # Just to make it round up properly. # Flip rows i and i+1 # Resolves Python 3's rounding definition ########################################################################### ########################################################################### #------------------------------------------------------------------| # # Integer ambiguity vector search via search-and-shrink technique. # # INPUTS: # # ahat : Float ambiguities (should be decorrelated for # computational efficiency) # L,D : LtDL-decomposition of the variance-covariance matrix # of the float ambiguities ahat # ncands: Number of requested candidates # # OUTPUTS: # # afixed: estimated integers (n, x, ncands) # sqnorm: corresponding squared norms (n-vector, ascending order) # #------------------------------------------------------------------| # Date : 02-SEPT-2010 | # Author : <NAME> | # GNSS Research Center, Department of Spatial Sciences | # Curtin University of Technology | # E-mail : <EMAIL> | #------------------------------------------------------------------| # First, check that float ambiguity and D have same length # Initialising outputs # Initializing the variables for searching # Start search with an infinite chi-square # MATLAB distance function # Search trigger # Count the number of candidates # Give a positive step. # Initially, the maximum F(z) is at ncands # Used to compute conditional ambiguities # Now we start the main search loop. # Case 1: move down # Very rarely would this happen... # ... but just in case, you know. # Case 2: store the found candidate and try the next. # Store the 1st ncands-1 initial points as candidates # Store F(zcond) # No need to add '-1' to imax # Case 3: exit or move up # Move up # Get an array of INDICES for a sort. # Get an array of ACTUAL SORTS for sqnorm. ########################################################################### ########################################################################### Initialisation and some initial sanity checks... # Initialise all output variables # Test inputs: Is the Q-matrix symmetric? # Test inputs: Is the Q-matrix positive-definite? # Test inputs: Does Q-matrix and amb vector have identical dimensions? ########################################################################### ########################################################################### Begin least-squares ambiguity decorrelation adjustment! # Remove integer numbers from float solution, so that all values are # between -1 and 1 (for computational convenience only) # Compute Z matrix based on the decomposition Q=L^T*D*L; # Integer ambiguity vector search via search-and-shrink # Perform the back-transformation and add the increments ########################################################################### ########################################################################### Returns best amb-fix, second best amb-fix, and the square norm ########################################################################### ########################################################################### | 3.137252 | 3 |
summarizer/test_summarizer.py | bmcilw1/text-summary | 0 | 708 | <filename>summarizer/test_summarizer.py
from summarizer.summarizer import summarize
def test_summarize_whenPassedEmptyString_ReturnsEmpty():
assert summarize("") == "" | <filename>summarizer/test_summarizer.py
from summarizer.summarizer import summarize
def test_summarize_whenPassedEmptyString_ReturnsEmpty():
assert summarize("") == "" | none | 1 | 2.093954 | 2 |
|
XORCipher/XOREncrypt.py | KarthikGandrala/DataEncryption | 1 | 709 | <reponame>KarthikGandrala/DataEncryption
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Function to encrypt message using key is defined
def encrypt(msg, key):
# Defining empty strings and counters
hexadecimal = ''
iteration = 0
# Running for loop in the range of MSG and comparing the BITS
for i in range(len(msg)):
temp = ord(msg[i]) ^ ord(key[iteration])
# zfill will pad a single letter hex with 0, to make it two letter pair
hexadecimal += hex(temp)[2:].zfill(2)
# Checking if the iterations of the key are 1
iteration += 1
if iteration >= len(key):
# once all of the key's letters are used, repeat the key
iteration = 0
# Returning the final value
return hexadecimal
def decrypt(msg, key):
# Defining hex to uni string to store
hex_to_uni = ''
# Running for loop to the length of message
for i in range(0, len(msg), 2):
# Decoding each individual bytes from hex
hex_to_uni += bytes.fromhex(msg[i:i + 2]).decode('utf-8')
decryp_text = ''
iteration = 0
# For loop running for the length of the hex to unicode string
for i in range(len(hex_to_uni)):
# Comparing each individual bit
temp = ord(hex_to_uni[i]) ^ ord(key[iteration])
# zfill will pad a single letter hex with 0, to make it two letter pair
decryp_text += chr(temp)
iteration += 1
if iteration >= len(key):
# once all of the key's letters are used, repeat the key
iteration = 0
# FInally return the decrypted text string
return decryp_text
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# Function to encrypt message using key is defined
def encrypt(msg, key):
# Defining empty strings and counters
hexadecimal = ''
iteration = 0
# Running for loop in the range of MSG and comparing the BITS
for i in range(len(msg)):
temp = ord(msg[i]) ^ ord(key[iteration])
# zfill will pad a single letter hex with 0, to make it two letter pair
hexadecimal += hex(temp)[2:].zfill(2)
# Checking if the iterations of the key are 1
iteration += 1
if iteration >= len(key):
# once all of the key's letters are used, repeat the key
iteration = 0
# Returning the final value
return hexadecimal
def decrypt(msg, key):
# Defining hex to uni string to store
hex_to_uni = ''
# Running for loop to the length of message
for i in range(0, len(msg), 2):
# Decoding each individual bytes from hex
hex_to_uni += bytes.fromhex(msg[i:i + 2]).decode('utf-8')
decryp_text = ''
iteration = 0
# For loop running for the length of the hex to unicode string
for i in range(len(hex_to_uni)):
# Comparing each individual bit
temp = ord(hex_to_uni[i]) ^ ord(key[iteration])
# zfill will pad a single letter hex with 0, to make it two letter pair
decryp_text += chr(temp)
iteration += 1
if iteration >= len(key):
# once all of the key's letters are used, repeat the key
iteration = 0
# FInally return the decrypted text string
return decryp_text | en | 0.740585 | #!/usr/bin/python # -*- coding: utf-8 -*- # Function to encrypt message using key is defined # Defining empty strings and counters # Running for loop in the range of MSG and comparing the BITS # zfill will pad a single letter hex with 0, to make it two letter pair # Checking if the iterations of the key are 1 # once all of the key's letters are used, repeat the key # Returning the final value # Defining hex to uni string to store # Running for loop to the length of message # Decoding each individual bytes from hex # For loop running for the length of the hex to unicode string # Comparing each individual bit # zfill will pad a single letter hex with 0, to make it two letter pair # once all of the key's letters are used, repeat the key # FInally return the decrypted text string | 4.171212 | 4 |
02-Use-functions/21-Opening_a_file/secret_message.py | francisrod01/udacity_python_foundations | 0 | 710 | <reponame>francisrod01/udacity_python_foundations<filename>02-Use-functions/21-Opening_a_file/secret_message.py
#!/usr/bin/python3
import os
import random
def rename_files(path):
file_list = os.listdir(path)
print(file_list)
for file_name in file_list:
# Remove numbers from filename.
# new_file_name file_name.translation(None, "0123456789")
# Add random numbers to beginning of filename.
new_file_name = str(random.randint(1, 99)) + file_name
print("Renaming " + file_name + " to " + new_file_name)
os.rename(os.path.join(path, file_name), os.path.join(path, new_file_name))
print("# Python program - Adding random numbers to beginning of filename.")
rename_files("./prank")
| #!/usr/bin/python3
import os
import random
def rename_files(path):
file_list = os.listdir(path)
print(file_list)
for file_name in file_list:
# Remove numbers from filename.
# new_file_name file_name.translation(None, "0123456789")
# Add random numbers to beginning of filename.
new_file_name = str(random.randint(1, 99)) + file_name
print("Renaming " + file_name + " to " + new_file_name)
os.rename(os.path.join(path, file_name), os.path.join(path, new_file_name))
print("# Python program - Adding random numbers to beginning of filename.")
rename_files("./prank") | en | 0.571112 | #!/usr/bin/python3 # Remove numbers from filename. # new_file_name file_name.translation(None, "0123456789") # Add random numbers to beginning of filename. | 4.077691 | 4 |
xarray/core/variable.py | timgates42/xarray | 0 | 711 | import copy
import functools
import itertools
import numbers
import warnings
from collections import defaultdict
from datetime import timedelta
from distutils.version import LooseVersion
from typing import (
Any,
Dict,
Hashable,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils
from .indexing import (
BasicIndexer,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
as_indexable,
)
from .npcompat import IS_NEP18_ACTIVE
from .options import _get_keep_attrs
from .pycompat import (
cupy_array_type,
dask_array_type,
integer_types,
is_duck_dask_array,
)
from .utils import (
OrderedSet,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
ensure_us_time_resolution,
infix_dims,
is_duck_array,
)
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
(
indexing.ExplicitlyIndexed,
pd.Index,
)
+ dask_array_type
+ cupy_array_type
)
# https://github.com/python/mypy/issues/224
BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore
VariableType = TypeVar("VariableType", bound="Variable")
"""Type annotation to be used when methods of Variable return self or a copy of self.
When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the
output as an instance of the subclass.
Usage::
class Variable:
def f(self: VariableType, ...) -> VariableType:
...
"""
class MissingDimensionsError(ValueError):
"""Error class used when we can't safely guess a dimension name."""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]":
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__(
"Could not convert tuple of form "
"(dims, data[, attrs, encoding]): "
"{} to Variable.".format(obj)
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
"cannot set variable %r with %r-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead." % (name, data.ndim)
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
"unable to convert object into a variable without an "
"explicit list of dimensions: %r" % obj
)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
"%r has more than 1-dimension and the same name as one of its "
"dimensions %r. xarray disallows such variables because they "
"conflict with the coordinates used to label "
"dimensions." % (name, obj.dims)
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention. Also used for
validating that datetime64 and timedelta64 objects are within the valid date
range for ns precision, as pandas will raise an error if they are not.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, "ns")
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
data = getattr(data, "values", data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if not isinstance(data, np.ndarray):
if hasattr(data, "__array_function__"):
if IS_NEP18_ACTIVE:
return data
else:
raise TypeError(
"Got an NumPy-like array type providing the "
"__array_function__ protocol but NEP18 is not enabled. "
"Check that numpy >= v1.16 and that the environment "
'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to '
'"1"'
)
# validate whether the data is valid data types.
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == "O":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "M":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "m":
data = _possibly_convert_objects(data)
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
if isinstance(data, cupy_array_type):
data = data.get()
else:
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == "M":
data = np.datetime64(data, "ns")
elif data.dtype.kind == "m":
data = np.timedelta64(data, "ns")
return data
class Variable(
common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin
):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
__slots__ = ("_dims", "_data", "_attrs", "_encoding")
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or (
isinstance(self._data, indexing.MemoryCachedArray)
and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
)
@property
def data(self):
if is_duck_array(self._data):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
f"replacement data must match the Variable's shape. "
f"replacement data has shape {data.shape}; Variable has shape {self.shape}"
)
self._data = data
def astype(
self: VariableType,
dtype,
*,
order=None,
casting=None,
subok=None,
copy=None,
keep_attrs=True,
) -> VariableType:
"""
Copy of the Variable object, with data cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result. ‘C’ means C order,
‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are
Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to
the order the array elements appear in memory as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise the
returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
keep_attrs : bool, optional
By default, astype keeps attributes. Set to False to remove
attributes in the returned object.
Returns
-------
out : same as object
New object with data cast to the specified type.
Notes
-----
The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed
through to the ``astype`` method of the underlying array when a value
different than ``None`` is supplied.
Make sure to only supply these arguments if the underlying array class
supports them.
See also
--------
numpy.ndarray.astype
dask.array.Array.astype
sparse.COO.astype
"""
from .computation import apply_ufunc
kwargs = dict(order=order, casting=casting, subok=subok, copy=copy)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return apply_ufunc(
duck_array_ops.astype,
self,
dtype,
kwargs=kwargs,
keep_attrs=keep_attrs,
dask="allowed",
)
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if is_duck_dask_array(self._data):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not is_duck_array(self._data):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_tokenize__(self):
# Use v.data, instead of v._data, in order to cope with the wrappers
# around NetCDF and the like
from dask.base import normalize_token
return normalize_token((type(self), self._dims, self.data, self._attrs))
def __dask_graph__(self):
if is_duck_dask_array(self._data):
return self._data.__dask_graph__()
else:
return None
def __dask_keys__(self):
return self._data.__dask_keys__()
def __dask_layers__(self):
return self._data.__dask_layers__()
@property
def __dask_optimize__(self):
return self._data.__dask_optimize__
@property
def __dask_scheduler__(self):
return self._data.__dask_scheduler__
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
@staticmethod
def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):
data = array_func(results, *array_args)
return Variable(dims, data, attrs=attrs, encoding=encoding)
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_variable = utils.alias(to_base_variable, "to_variable")
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)}
if data:
item["data"] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({"dtype": str(self.dtype), "shape": self.shape})
return item
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated."""
return self._dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _parse_dimensions(self, dims):
if isinstance(dims, str):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError(
"dimensions %s must have the same length as the "
"number of data dimensions, ndim=%s" % (dims, self.ndim)
)
return dims
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
-----------
key: int, slice, array-like, dict or tuple of integer, slice and array-like
Any valid input for indexing.
Returns
-------
dims : tuple
Dimension of the resultant variable.
indexers : IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key
)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(
dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)
)
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
if k.dtype.kind == "b":
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {:d} is used to index array "
"with shape {:s}.".format(len(k), str(self.shape))
)
if k.ndim > 1:
raise IndexError(
"{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim)
)
if getattr(k, "dims", (dim,)) != (dim,):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {:s} but the target dimension is {:s}.".format(
str(k.dims), dim
)
)
def _broadcast_indexes_outer(self, key):
dims = tuple(
k.dims[0] if isinstance(k, Variable) else dim
for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types)
)
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.size == 0:
# Slice by empty list; numpy could not infer the dtype
k = k.astype(int)
elif k.dtype.kind == "b":
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (
value
if isinstance(value, Variable)
else as_variable(value, name=dim)
)
if variable.dtype.kind == "b": # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError(f"Dimensions of indexers mismatch: {key}")
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims)) if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible."""
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
"shape mismatch: value array of shape %s could not be "
"broadcast to indexing result with %s dimensions"
% (value.shape, len(dims))
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of local attributes on this variable."""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable."""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError("encoding must be castable to a dictionary")
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
data = copy.deepcopy(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self, dims=_default, data=_default, attrs=_default, encoding=_default
) -> "Variable":
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, "chunks", None)
_array_counter = itertools.count()
def chunk(self, chunks={}, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask
import dask.array as da
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
)
chunks = {}
if utils.is_dict_like(chunks):
chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}
data = self._data
if is_duck_dask_array(data):
data = data.rechunk(chunks)
else:
if isinstance(data, indexing.ExplicitlyIndexed):
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
# that can't handle general array indexing. For example, in netCDF4 you
# can do "outer" indexing along two dimensions independent, which works
# differently from how NumPy handles it.
# da.from_array works by using lazy indexing with a tuple of slices.
# Using OuterIndexer is a pragmatic choice: dask does not yet handle
# different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer
)
if LooseVersion(dask.__version__) < "2.0.0":
kwargs = {}
else:
# All of our lazily loaded backend array classes should use NumPy
# array operations.
kwargs = {"meta": np.ndarray}
else:
kwargs = {}
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA):
"""
use sparse-array as backend.
"""
import sparse
# TODO: what to do if dask-backended?
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = dtypes.result_type(self.dtype, fill_value)
if sparse_format is _default:
sparse_format = "coo"
try:
as_sparse = getattr(sparse, f"as_{sparse_format.lower()}")
except AttributeError:
raise ValueError(f"{sparse_format} is not a valid sparse format")
data = as_sparse(self.data.astype(dtype), fill_value=fill_value)
return self._replace(data=data)
def _to_dense(self):
"""
Change backend from sparse to np.array
"""
if hasattr(self._data, "todense"):
return self._replace(data=self._data.todense())
return self.copy(deep=False)
def isel(
self: VariableType,
indexers: Mapping[Hashable, Any] = None,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> VariableType:
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)
return self[key]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel({d: 0 for d in dims})
def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
width = min(abs(count), self.shape[axis])
dim_pad = (width, 0) if count >= 0 else (0, width)
pads = [(0, 0) if d != dim else dim_pad for d in self.dims]
data = duck_array_ops.pad(
trimmed_data.astype(dtype),
pads,
mode="constant",
constant_values=fill_value,
)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""
Return a new Variable with shifted data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value: scalar, optional
Value to use for newly missing values
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count, fill_value=fill_value)
return result
def _pad_options_dim_to_index(
self,
pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]],
fill_with_shape=False,
):
if fill_with_shape:
return [
(n, n) if d not in pad_option else pad_option[d]
for d, n in zip(self.dims, self.data.shape)
]
return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims]
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
):
"""
Return a new Variable with padded data.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : str, default: "constant"
See numpy / Dask docs
stat_length : int, tuple or mapping of hashable to tuple
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
constant_values : scalar, tuple or mapping of hashable to tuple
Used in 'constant'. The values to set the padded values for each
axis.
end_values : scalar, tuple or mapping of hashable to tuple
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
reflect_type : {"even", "odd"}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs
One of pad_width or pad_width_kwargs must be provided.
Returns
-------
padded : Variable
Variable with the same dimensions and attributes but padded data.
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
# change default behaviour of pad with mode constant
if mode == "constant" and (
constant_values is None or constant_values is dtypes.NA
):
dtype, constant_values = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
# create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty
if isinstance(stat_length, dict):
stat_length = self._pad_options_dim_to_index(
stat_length, fill_with_shape=True
)
if isinstance(constant_values, dict):
constant_values = self._pad_options_dim_to_index(constant_values)
if isinstance(end_values, dict):
end_values = self._pad_options_dim_to_index(end_values)
# workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303
if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]:
stat_length = [(n, n) for n in self.data.shape] # type: ignore
# change integer values to a tuple of two of those values and change pad_width to index
for k, v in pad_width.items():
if isinstance(v, numbers.Number):
pad_width[k] = (v, v)
pad_width_by_index = self._pad_options_dim_to_index(pad_width)
# create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty
pad_option_kwargs = {}
if stat_length is not None:
pad_option_kwargs["stat_length"] = stat_length
if constant_values is not None:
pad_option_kwargs["constant_values"] = constant_values
if end_values is not None:
pad_option_kwargs["end_values"] = end_values
if reflect_type is not None:
pad_option_kwargs["reflect_type"] = reflect_type # type: ignore
array = duck_array_ops.pad(
self.data.astype(dtype, copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
axes = self.get_axis_num(dims)
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
@property
def T(self) -> "Variable":
return self.transpose()
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
"new dimensions %r must be a superset of "
"existing dimensions %r" % (dims, self.dims)
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError("invalid existing dimensions: %s" % dims)
if new_dim in self.dims:
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to tuple of hashable
Mapping of form new_name=(dim1, dim2, ...) describing the
names of new dimensions, and the existing dimensions that
they replace.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError("invalid existing dimension: %s" % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError(
"the product of the new dimension sizes must "
"equal the size of the old dimension"
)
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to mapping of hashable to int
Mapping of the form old_dim={dim1: size1, ...} describing the
names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if axis is not None:
data = func(self.data, axis=axis, **kwargs)
else:
data = func(self.data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = [
adim for n, adim in enumerate(self.dims) if n not in removed_axes
]
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to
which to assign each dataset along the concatenated dimension.
If not supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = dict(first_var.attrs)
encoding = dict(first_var.encoding)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError(
f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}"
)
return cls(dims, data, attrs, encoding)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and (
self._data is other._data or equiv(self.data, other.data)
)
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other, equiv=duck_array_ops.array_equiv):
"""Like equals, but also checks attributes."""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self.equals(
other, equiv=equiv
)
except (TypeError, AttributeError):
return False
def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(other, equiv=equiv)
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
from .computation import apply_ufunc
_quantile_func = np.nanquantile if skipna else np.quantile
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
scalar = utils.is_scalar(q)
q = np.atleast_1d(np.asarray(q, dtype=np.float64))
if dim is None:
dim = self.dims
if utils.is_scalar(dim):
dim = [dim]
def _wrapper(npa, **kwargs):
# move quantile axis to end. required for apply_ufunc
return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1)
axis = np.arange(-1, -1 * len(dim) - 1, -1)
result = apply_ufunc(
_wrapper,
self,
input_core_dims=[dim],
exclude_dims=set(dim),
output_core_dims=[["quantile"]],
output_dtypes=[np.float64],
dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}),
dask="parallelized",
kwargs={"q": q, "axis": axis, "interpolation": interpolation},
)
# for backward compatibility
result = result.transpose("quantile", ...)
if scalar:
result = result.squeeze("quantile")
if keep_attrs:
result.attrs = self._attrs
return result
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
data = self.data
if is_duck_dask_array(data):
raise TypeError(
"rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method."
)
elif not isinstance(data, np.ndarray):
raise TypeError(
"rank is not implemented for {} objects.".format(type(data))
)
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata
ranked = func(data, axis=axis)
if pct:
count = np.sum(~np.isnan(data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(
self, dim, window, window_dim, center=False, fill_value=dtypes.NA
):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim : str
Dimension over which to compute rolling_window.
For nd-rolling, should be list of dimensions.
window : int
Window size of the rolling
For nd-rolling, should be list of integers.
window_dim : str
New name of the window dimension.
For nd-rolling, should be list of integers.
center : bool, default: False
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4)))
>>> v.rolling_window("b", 3, "window_dim")
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
>>> v.rolling_window("b", 3, "window_dim", center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
array = self.astype(dtype, copy=False).data
else:
dtype = self.dtype
array = self.data
if isinstance(dim, list):
assert len(dim) == len(window)
assert len(dim) == len(window_dim)
assert len(dim) == len(center)
else:
dim = [dim]
window = [window]
window_dim = [window_dim]
center = [center]
axis = [self.get_axis_num(d) for d in dim]
new_dims = self.dims + tuple(window_dim)
return Variable(
new_dims,
duck_array_ops.rolling_window(
array, axis=axis, window=window, center=center, fill_value=fill_value
),
)
def coarsen(
self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs
):
"""
Apply reduction function.
"""
windows = {k: v for k, v in windows.items() if k in self.dims}
if not windows:
return self.copy()
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
_attrs = self.attrs
else:
_attrs = None
reshaped, axes = self._coarsen_reshape(windows, boundary, side)
if isinstance(func, str):
name = func
func = getattr(duck_array_ops, name, None)
if func is None:
raise NameError(f"{name} is not a valid method.")
return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs)
def _coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(f"window must be > 0. Given {window}")
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
if n * window != size:
raise ValueError(
"Could not coarsen a dimension of size {} with "
"window {}".format(size, window)
)
elif boundary[d] == "trim":
if side[d] == "left":
variable = variable.isel({d: slice(0, window * n)})
else:
excess = size - window * n
variable = variable.isel({d: slice(excess, None)})
elif boundary[d] == "pad": # pad
pad = window * n - size
if pad < 0:
pad += window
if side[d] == "left":
pad_width = {d: (0, pad)}
else:
pad_width = {d: (pad, 0)}
variable = variable.pad(pad_width, mode="constant")
else:
raise TypeError(
"{} is invalid for boundary. Valid option is 'exact', "
"'trim' and 'pad'".format(boundary[d])
)
shape = []
axes = []
axis_count = 0
for i, d in enumerate(variable.dims):
if d in windows:
size = variable.shape[i]
shape.append(int(size / windows[d]))
shape.append(windows[d])
axis_count += 1
axes.append(i + axis_count)
else:
shape.append(variable.shape[i])
return variable.data.reshape(shape), tuple(axes)
def isnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is a missing value.
Returns
-------
isnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.isnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.isnull()
<xarray.Variable (x: 3)>
array([False, True, False])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.isnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
def notnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is not a missing value.
Returns
-------
notnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.notnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.notnull()
<xarray.Variable (x: 3)>
array([ True, False, True])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.notnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
with np.errstate(all="ignore"):
result = self.__array_wrap__(f(self.data, *args, **kwargs))
if keep_attrs:
result.attrs = self.attrs
return result
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
with np.errstate(all="ignore"):
new_data = (
f(self_data, other_data)
if not reflexive
else f(other_data, self_data)
)
result = Variable(dims, new_data, attrs=attrs)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xr.Dataset):
raise TypeError("cannot add a Dataset to a Variable in-place")
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError("dimensions cannot change for in-place operations")
with np.errstate(all="ignore"):
self.values = f(self_data, other_data)
return self
return func
def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):
"""A (private) method to convert datetime array to numeric dtype
See duck_array_ops.datetime_to_numeric
"""
numeric_array = duck_array_ops.datetime_to_numeric(
self.data, offset, datetime_unit, dtype
)
return type(self)(self.dims, numeric_array, self._attrs)
def _unravel_argminmax(
self,
argminmax: str,
dim: Union[Hashable, Sequence[Hashable], None],
axis: Union[int, None],
keep_attrs: Optional[bool],
skipna: Optional[bool],
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Apply argmin or argmax over one or more dimensions, returning the result as a
dict of DataArray that can be passed directly to isel.
"""
if dim is None and axis is None:
warnings.warn(
"Behaviour of argmin/argmax with neither dim nor axis argument will "
"change to return a dict of indices of each dimension. To get a "
"single, flat index, please use np.argmin(da.data) or "
"np.argmax(da.data) instead of da.argmin() or da.argmax().",
DeprecationWarning,
stacklevel=3,
)
argminmax_func = getattr(duck_array_ops, argminmax)
if dim is ...:
# In future, should do this also when (dim is None and axis is None)
dim = self.dims
if (
dim is None
or axis is not None
or not isinstance(dim, Sequence)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
return self.reduce(
argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna
)
# Get a name for the new dimension that does not conflict with any existing
# dimension
newdimname = "_unravel_argminmax_dim_0"
count = 1
while newdimname in self.dims:
newdimname = f"_unravel_argminmax_dim_{count}"
count += 1
stacked = self.stack({newdimname: dim})
result_dims = stacked.dims[:-1]
reduce_shape = tuple(self.sizes[d] for d in dim)
result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna)
result_unravelled_indices = duck_array_ops.unravel_index(
result_flat_indices.data, reduce_shape
)
result = {
d: Variable(dims=result_dims, data=i)
for d, i in zip(dim, result_unravelled_indices)
}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
for v in result.values():
v.attrs = self.attrs
return result
def argmin(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the minimum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmin, DataArray.idxmin
"""
return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna)
def argmax(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the maximum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmax, DataArray.idxmax
"""
return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna)
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
__slots__ = ()
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super().__init__(dims, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError("%s objects must be 1-dimensional" % type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def __dask_tokenize__(self):
from dask.base import normalize_token
# Don't waste time converting pd.Index to np.ndarray
return normalize_token((type(self), self._dims, self._data.array, self._attrs))
def load(self):
# data is already loaded into memory for IndexVariable
return self
# https://github.com/python/mypy/issues/1465
@Variable.data.setter # type: ignore
def data(self, data):
raise ValueError(
f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
@Variable.values.setter # type: ignore
def values(self, values):
raise ValueError(
f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
def chunk(self, chunks={}, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _as_sparse(self, sparse_format=_default, fill_value=_default):
# Dummy
return self.copy(deep=False)
def _to_dense(self):
# Dummy
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, "ndim", 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError("%s values cannot be modified" % type(self).__name__)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError(
"IndexVariable.concat requires that all input "
"variables be IndexVariable objects"
)
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(np.concatenate(positions))
data = data.take(indices)
attrs = dict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError("inconsistent dimensions")
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super().equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and self._data_equals(other)
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError("cannot modify name of IndexVariable in-place")
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _unified_dims(variables):
# validate dimensions
all_dims = {}
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError(
"broadcasting cannot handle duplicate "
"dimensions: %r" % list(var_dims)
)
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension %r: %s"
% (d, (all_dims[d], s))
)
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append(f"{n!r} ({var_name})")
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append("(%s)" % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = "\n".join(", ".join(v) for v in duplicate_names)
raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str)
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError(
"conflicting level / dimension names. {} "
"already exists as a level name.".format(d)
)
| import copy
import functools
import itertools
import numbers
import warnings
from collections import defaultdict
from datetime import timedelta
from distutils.version import LooseVersion
from typing import (
Any,
Dict,
Hashable,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils
from .indexing import (
BasicIndexer,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
as_indexable,
)
from .npcompat import IS_NEP18_ACTIVE
from .options import _get_keep_attrs
from .pycompat import (
cupy_array_type,
dask_array_type,
integer_types,
is_duck_dask_array,
)
from .utils import (
OrderedSet,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
ensure_us_time_resolution,
infix_dims,
is_duck_array,
)
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
(
indexing.ExplicitlyIndexed,
pd.Index,
)
+ dask_array_type
+ cupy_array_type
)
# https://github.com/python/mypy/issues/224
BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore
VariableType = TypeVar("VariableType", bound="Variable")
"""Type annotation to be used when methods of Variable return self or a copy of self.
When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the
output as an instance of the subclass.
Usage::
class Variable:
def f(self: VariableType, ...) -> VariableType:
...
"""
class MissingDimensionsError(ValueError):
"""Error class used when we can't safely guess a dimension name."""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]":
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__(
"Could not convert tuple of form "
"(dims, data[, attrs, encoding]): "
"{} to Variable.".format(obj)
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
"cannot set variable %r with %r-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead." % (name, data.ndim)
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
"unable to convert object into a variable without an "
"explicit list of dimensions: %r" % obj
)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
"%r has more than 1-dimension and the same name as one of its "
"dimensions %r. xarray disallows such variables because they "
"conflict with the coordinates used to label "
"dimensions." % (name, obj.dims)
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention. Also used for
validating that datetime64 and timedelta64 objects are within the valid date
range for ns precision, as pandas will raise an error if they are not.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, "ns")
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
data = getattr(data, "values", data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if not isinstance(data, np.ndarray):
if hasattr(data, "__array_function__"):
if IS_NEP18_ACTIVE:
return data
else:
raise TypeError(
"Got an NumPy-like array type providing the "
"__array_function__ protocol but NEP18 is not enabled. "
"Check that numpy >= v1.16 and that the environment "
'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to '
'"1"'
)
# validate whether the data is valid data types.
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == "O":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "M":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "m":
data = _possibly_convert_objects(data)
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
if isinstance(data, cupy_array_type):
data = data.get()
else:
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == "M":
data = np.datetime64(data, "ns")
elif data.dtype.kind == "m":
data = np.timedelta64(data, "ns")
return data
class Variable(
common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin
):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
__slots__ = ("_dims", "_data", "_attrs", "_encoding")
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or (
isinstance(self._data, indexing.MemoryCachedArray)
and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
)
@property
def data(self):
if is_duck_array(self._data):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
f"replacement data must match the Variable's shape. "
f"replacement data has shape {data.shape}; Variable has shape {self.shape}"
)
self._data = data
def astype(
self: VariableType,
dtype,
*,
order=None,
casting=None,
subok=None,
copy=None,
keep_attrs=True,
) -> VariableType:
"""
Copy of the Variable object, with data cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result. ‘C’ means C order,
‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are
Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to
the order the array elements appear in memory as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise the
returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
keep_attrs : bool, optional
By default, astype keeps attributes. Set to False to remove
attributes in the returned object.
Returns
-------
out : same as object
New object with data cast to the specified type.
Notes
-----
The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed
through to the ``astype`` method of the underlying array when a value
different than ``None`` is supplied.
Make sure to only supply these arguments if the underlying array class
supports them.
See also
--------
numpy.ndarray.astype
dask.array.Array.astype
sparse.COO.astype
"""
from .computation import apply_ufunc
kwargs = dict(order=order, casting=casting, subok=subok, copy=copy)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return apply_ufunc(
duck_array_ops.astype,
self,
dtype,
kwargs=kwargs,
keep_attrs=keep_attrs,
dask="allowed",
)
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if is_duck_dask_array(self._data):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not is_duck_array(self._data):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_tokenize__(self):
# Use v.data, instead of v._data, in order to cope with the wrappers
# around NetCDF and the like
from dask.base import normalize_token
return normalize_token((type(self), self._dims, self.data, self._attrs))
def __dask_graph__(self):
if is_duck_dask_array(self._data):
return self._data.__dask_graph__()
else:
return None
def __dask_keys__(self):
return self._data.__dask_keys__()
def __dask_layers__(self):
return self._data.__dask_layers__()
@property
def __dask_optimize__(self):
return self._data.__dask_optimize__
@property
def __dask_scheduler__(self):
return self._data.__dask_scheduler__
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
@staticmethod
def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):
data = array_func(results, *array_args)
return Variable(dims, data, attrs=attrs, encoding=encoding)
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_variable = utils.alias(to_base_variable, "to_variable")
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)}
if data:
item["data"] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({"dtype": str(self.dtype), "shape": self.shape})
return item
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated."""
return self._dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _parse_dimensions(self, dims):
if isinstance(dims, str):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError(
"dimensions %s must have the same length as the "
"number of data dimensions, ndim=%s" % (dims, self.ndim)
)
return dims
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
-----------
key: int, slice, array-like, dict or tuple of integer, slice and array-like
Any valid input for indexing.
Returns
-------
dims : tuple
Dimension of the resultant variable.
indexers : IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key
)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(
dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)
)
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
if k.dtype.kind == "b":
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {:d} is used to index array "
"with shape {:s}.".format(len(k), str(self.shape))
)
if k.ndim > 1:
raise IndexError(
"{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim)
)
if getattr(k, "dims", (dim,)) != (dim,):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {:s} but the target dimension is {:s}.".format(
str(k.dims), dim
)
)
def _broadcast_indexes_outer(self, key):
dims = tuple(
k.dims[0] if isinstance(k, Variable) else dim
for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types)
)
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.size == 0:
# Slice by empty list; numpy could not infer the dtype
k = k.astype(int)
elif k.dtype.kind == "b":
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (
value
if isinstance(value, Variable)
else as_variable(value, name=dim)
)
if variable.dtype.kind == "b": # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError(f"Dimensions of indexers mismatch: {key}")
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims)) if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible."""
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
"shape mismatch: value array of shape %s could not be "
"broadcast to indexing result with %s dimensions"
% (value.shape, len(dims))
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of local attributes on this variable."""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable."""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError("encoding must be castable to a dictionary")
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
data = copy.deepcopy(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self, dims=_default, data=_default, attrs=_default, encoding=_default
) -> "Variable":
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, "chunks", None)
_array_counter = itertools.count()
def chunk(self, chunks={}, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask
import dask.array as da
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
)
chunks = {}
if utils.is_dict_like(chunks):
chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}
data = self._data
if is_duck_dask_array(data):
data = data.rechunk(chunks)
else:
if isinstance(data, indexing.ExplicitlyIndexed):
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
# that can't handle general array indexing. For example, in netCDF4 you
# can do "outer" indexing along two dimensions independent, which works
# differently from how NumPy handles it.
# da.from_array works by using lazy indexing with a tuple of slices.
# Using OuterIndexer is a pragmatic choice: dask does not yet handle
# different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer
)
if LooseVersion(dask.__version__) < "2.0.0":
kwargs = {}
else:
# All of our lazily loaded backend array classes should use NumPy
# array operations.
kwargs = {"meta": np.ndarray}
else:
kwargs = {}
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA):
"""
use sparse-array as backend.
"""
import sparse
# TODO: what to do if dask-backended?
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = dtypes.result_type(self.dtype, fill_value)
if sparse_format is _default:
sparse_format = "coo"
try:
as_sparse = getattr(sparse, f"as_{sparse_format.lower()}")
except AttributeError:
raise ValueError(f"{sparse_format} is not a valid sparse format")
data = as_sparse(self.data.astype(dtype), fill_value=fill_value)
return self._replace(data=data)
def _to_dense(self):
"""
Change backend from sparse to np.array
"""
if hasattr(self._data, "todense"):
return self._replace(data=self._data.todense())
return self.copy(deep=False)
def isel(
self: VariableType,
indexers: Mapping[Hashable, Any] = None,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> VariableType:
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)
return self[key]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel({d: 0 for d in dims})
def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
width = min(abs(count), self.shape[axis])
dim_pad = (width, 0) if count >= 0 else (0, width)
pads = [(0, 0) if d != dim else dim_pad for d in self.dims]
data = duck_array_ops.pad(
trimmed_data.astype(dtype),
pads,
mode="constant",
constant_values=fill_value,
)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""
Return a new Variable with shifted data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value: scalar, optional
Value to use for newly missing values
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count, fill_value=fill_value)
return result
def _pad_options_dim_to_index(
self,
pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]],
fill_with_shape=False,
):
if fill_with_shape:
return [
(n, n) if d not in pad_option else pad_option[d]
for d, n in zip(self.dims, self.data.shape)
]
return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims]
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
):
"""
Return a new Variable with padded data.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : str, default: "constant"
See numpy / Dask docs
stat_length : int, tuple or mapping of hashable to tuple
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
constant_values : scalar, tuple or mapping of hashable to tuple
Used in 'constant'. The values to set the padded values for each
axis.
end_values : scalar, tuple or mapping of hashable to tuple
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
reflect_type : {"even", "odd"}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs
One of pad_width or pad_width_kwargs must be provided.
Returns
-------
padded : Variable
Variable with the same dimensions and attributes but padded data.
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
# change default behaviour of pad with mode constant
if mode == "constant" and (
constant_values is None or constant_values is dtypes.NA
):
dtype, constant_values = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
# create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty
if isinstance(stat_length, dict):
stat_length = self._pad_options_dim_to_index(
stat_length, fill_with_shape=True
)
if isinstance(constant_values, dict):
constant_values = self._pad_options_dim_to_index(constant_values)
if isinstance(end_values, dict):
end_values = self._pad_options_dim_to_index(end_values)
# workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303
if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]:
stat_length = [(n, n) for n in self.data.shape] # type: ignore
# change integer values to a tuple of two of those values and change pad_width to index
for k, v in pad_width.items():
if isinstance(v, numbers.Number):
pad_width[k] = (v, v)
pad_width_by_index = self._pad_options_dim_to_index(pad_width)
# create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty
pad_option_kwargs = {}
if stat_length is not None:
pad_option_kwargs["stat_length"] = stat_length
if constant_values is not None:
pad_option_kwargs["constant_values"] = constant_values
if end_values is not None:
pad_option_kwargs["end_values"] = end_values
if reflect_type is not None:
pad_option_kwargs["reflect_type"] = reflect_type # type: ignore
array = duck_array_ops.pad(
self.data.astype(dtype, copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
axes = self.get_axis_num(dims)
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
@property
def T(self) -> "Variable":
return self.transpose()
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
"new dimensions %r must be a superset of "
"existing dimensions %r" % (dims, self.dims)
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError("invalid existing dimensions: %s" % dims)
if new_dim in self.dims:
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to tuple of hashable
Mapping of form new_name=(dim1, dim2, ...) describing the
names of new dimensions, and the existing dimensions that
they replace.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError("invalid existing dimension: %s" % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError(
"the product of the new dimension sizes must "
"equal the size of the old dimension"
)
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to mapping of hashable to int
Mapping of the form old_dim={dim1: size1, ...} describing the
names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if axis is not None:
data = func(self.data, axis=axis, **kwargs)
else:
data = func(self.data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = [
adim for n, adim in enumerate(self.dims) if n not in removed_axes
]
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to
which to assign each dataset along the concatenated dimension.
If not supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = dict(first_var.attrs)
encoding = dict(first_var.encoding)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError(
f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}"
)
return cls(dims, data, attrs, encoding)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and (
self._data is other._data or equiv(self.data, other.data)
)
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other, equiv=duck_array_ops.array_equiv):
"""Like equals, but also checks attributes."""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self.equals(
other, equiv=equiv
)
except (TypeError, AttributeError):
return False
def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(other, equiv=equiv)
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
from .computation import apply_ufunc
_quantile_func = np.nanquantile if skipna else np.quantile
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
scalar = utils.is_scalar(q)
q = np.atleast_1d(np.asarray(q, dtype=np.float64))
if dim is None:
dim = self.dims
if utils.is_scalar(dim):
dim = [dim]
def _wrapper(npa, **kwargs):
# move quantile axis to end. required for apply_ufunc
return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1)
axis = np.arange(-1, -1 * len(dim) - 1, -1)
result = apply_ufunc(
_wrapper,
self,
input_core_dims=[dim],
exclude_dims=set(dim),
output_core_dims=[["quantile"]],
output_dtypes=[np.float64],
dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}),
dask="parallelized",
kwargs={"q": q, "axis": axis, "interpolation": interpolation},
)
# for backward compatibility
result = result.transpose("quantile", ...)
if scalar:
result = result.squeeze("quantile")
if keep_attrs:
result.attrs = self._attrs
return result
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
data = self.data
if is_duck_dask_array(data):
raise TypeError(
"rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method."
)
elif not isinstance(data, np.ndarray):
raise TypeError(
"rank is not implemented for {} objects.".format(type(data))
)
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata
ranked = func(data, axis=axis)
if pct:
count = np.sum(~np.isnan(data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(
self, dim, window, window_dim, center=False, fill_value=dtypes.NA
):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim : str
Dimension over which to compute rolling_window.
For nd-rolling, should be list of dimensions.
window : int
Window size of the rolling
For nd-rolling, should be list of integers.
window_dim : str
New name of the window dimension.
For nd-rolling, should be list of integers.
center : bool, default: False
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4)))
>>> v.rolling_window("b", 3, "window_dim")
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
>>> v.rolling_window("b", 3, "window_dim", center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
array = self.astype(dtype, copy=False).data
else:
dtype = self.dtype
array = self.data
if isinstance(dim, list):
assert len(dim) == len(window)
assert len(dim) == len(window_dim)
assert len(dim) == len(center)
else:
dim = [dim]
window = [window]
window_dim = [window_dim]
center = [center]
axis = [self.get_axis_num(d) for d in dim]
new_dims = self.dims + tuple(window_dim)
return Variable(
new_dims,
duck_array_ops.rolling_window(
array, axis=axis, window=window, center=center, fill_value=fill_value
),
)
def coarsen(
self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs
):
"""
Apply reduction function.
"""
windows = {k: v for k, v in windows.items() if k in self.dims}
if not windows:
return self.copy()
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
_attrs = self.attrs
else:
_attrs = None
reshaped, axes = self._coarsen_reshape(windows, boundary, side)
if isinstance(func, str):
name = func
func = getattr(duck_array_ops, name, None)
if func is None:
raise NameError(f"{name} is not a valid method.")
return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs)
def _coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(f"window must be > 0. Given {window}")
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
if n * window != size:
raise ValueError(
"Could not coarsen a dimension of size {} with "
"window {}".format(size, window)
)
elif boundary[d] == "trim":
if side[d] == "left":
variable = variable.isel({d: slice(0, window * n)})
else:
excess = size - window * n
variable = variable.isel({d: slice(excess, None)})
elif boundary[d] == "pad": # pad
pad = window * n - size
if pad < 0:
pad += window
if side[d] == "left":
pad_width = {d: (0, pad)}
else:
pad_width = {d: (pad, 0)}
variable = variable.pad(pad_width, mode="constant")
else:
raise TypeError(
"{} is invalid for boundary. Valid option is 'exact', "
"'trim' and 'pad'".format(boundary[d])
)
shape = []
axes = []
axis_count = 0
for i, d in enumerate(variable.dims):
if d in windows:
size = variable.shape[i]
shape.append(int(size / windows[d]))
shape.append(windows[d])
axis_count += 1
axes.append(i + axis_count)
else:
shape.append(variable.shape[i])
return variable.data.reshape(shape), tuple(axes)
def isnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is a missing value.
Returns
-------
isnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.isnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.isnull()
<xarray.Variable (x: 3)>
array([False, True, False])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.isnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
def notnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is not a missing value.
Returns
-------
notnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.notnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.notnull()
<xarray.Variable (x: 3)>
array([ True, False, True])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.notnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
with np.errstate(all="ignore"):
result = self.__array_wrap__(f(self.data, *args, **kwargs))
if keep_attrs:
result.attrs = self.attrs
return result
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
with np.errstate(all="ignore"):
new_data = (
f(self_data, other_data)
if not reflexive
else f(other_data, self_data)
)
result = Variable(dims, new_data, attrs=attrs)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xr.Dataset):
raise TypeError("cannot add a Dataset to a Variable in-place")
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError("dimensions cannot change for in-place operations")
with np.errstate(all="ignore"):
self.values = f(self_data, other_data)
return self
return func
def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):
"""A (private) method to convert datetime array to numeric dtype
See duck_array_ops.datetime_to_numeric
"""
numeric_array = duck_array_ops.datetime_to_numeric(
self.data, offset, datetime_unit, dtype
)
return type(self)(self.dims, numeric_array, self._attrs)
def _unravel_argminmax(
self,
argminmax: str,
dim: Union[Hashable, Sequence[Hashable], None],
axis: Union[int, None],
keep_attrs: Optional[bool],
skipna: Optional[bool],
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Apply argmin or argmax over one or more dimensions, returning the result as a
dict of DataArray that can be passed directly to isel.
"""
if dim is None and axis is None:
warnings.warn(
"Behaviour of argmin/argmax with neither dim nor axis argument will "
"change to return a dict of indices of each dimension. To get a "
"single, flat index, please use np.argmin(da.data) or "
"np.argmax(da.data) instead of da.argmin() or da.argmax().",
DeprecationWarning,
stacklevel=3,
)
argminmax_func = getattr(duck_array_ops, argminmax)
if dim is ...:
# In future, should do this also when (dim is None and axis is None)
dim = self.dims
if (
dim is None
or axis is not None
or not isinstance(dim, Sequence)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
return self.reduce(
argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna
)
# Get a name for the new dimension that does not conflict with any existing
# dimension
newdimname = "_unravel_argminmax_dim_0"
count = 1
while newdimname in self.dims:
newdimname = f"_unravel_argminmax_dim_{count}"
count += 1
stacked = self.stack({newdimname: dim})
result_dims = stacked.dims[:-1]
reduce_shape = tuple(self.sizes[d] for d in dim)
result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna)
result_unravelled_indices = duck_array_ops.unravel_index(
result_flat_indices.data, reduce_shape
)
result = {
d: Variable(dims=result_dims, data=i)
for d, i in zip(dim, result_unravelled_indices)
}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
for v in result.values():
v.attrs = self.attrs
return result
def argmin(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the minimum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmin, DataArray.idxmin
"""
return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna)
def argmax(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the maximum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmax, DataArray.idxmax
"""
return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna)
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
__slots__ = ()
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super().__init__(dims, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError("%s objects must be 1-dimensional" % type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def __dask_tokenize__(self):
from dask.base import normalize_token
# Don't waste time converting pd.Index to np.ndarray
return normalize_token((type(self), self._dims, self._data.array, self._attrs))
def load(self):
# data is already loaded into memory for IndexVariable
return self
# https://github.com/python/mypy/issues/1465
@Variable.data.setter # type: ignore
def data(self, data):
raise ValueError(
f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
@Variable.values.setter # type: ignore
def values(self, values):
raise ValueError(
f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
def chunk(self, chunks={}, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _as_sparse(self, sparse_format=_default, fill_value=_default):
# Dummy
return self.copy(deep=False)
def _to_dense(self):
# Dummy
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, "ndim", 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError("%s values cannot be modified" % type(self).__name__)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError(
"IndexVariable.concat requires that all input "
"variables be IndexVariable objects"
)
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(np.concatenate(positions))
data = data.take(indices)
attrs = dict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError("inconsistent dimensions")
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super().equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and self._data_equals(other)
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError("cannot modify name of IndexVariable in-place")
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _unified_dims(variables):
# validate dimensions
all_dims = {}
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError(
"broadcasting cannot handle duplicate "
"dimensions: %r" % list(var_dims)
)
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension %r: %s"
% (d, (all_dims[d], s))
)
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append(f"{n!r} ({var_name})")
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append("(%s)" % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = "\n".join(", ".join(v) for v in duplicate_names)
raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str)
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError(
"conflicting level / dimension names. {} "
"already exists as a level name.".format(d)
)
| en | 0.742175 | # only for Dataset and DataArray # https://github.com/python/mypy/issues/224 # type: ignore Type annotation to be used when methods of Variable return self or a copy of self. When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the output as an instance of the subclass. Usage:: class Variable: def f(self: VariableType, ...) -> VariableType: ... Error class used when we can't safely guess a dimension name. # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. Returns ------- var : Variable The newly created variable. # TODO: consider extending this method to automatically handle Iris and # extract the primary Variable from DataArrays # use .format() instead of % because it handles tuples consistently # convert the Variable into an Index Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should all pass through unmodified. Convert arrays of datetime.datetime and datetime.timedelta objects into datetime64 and timedelta64, according to the pandas convention. Also used for validating that datetime64 and timedelta64 objects are within the valid date range for ns precision, as pandas will raise an error if they are not. Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If data has dtype=datetime64, ensure that it has ns precision. If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xarray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. # can't use fastpath (yet) for scalars # TODO: convert, handle datetime objects, too # we don't want nested self-described arrays # validate whether the data is valid data types. Return the given values as a numpy array, or as an individual item if it's a 0d datetime64 or timedelta64 array. Importantly, this function does not copy data if it is already an ndarray - otherwise, it will not be possible to update Variable values in place. This function mostly exists because 0-dimensional ndarrays with dtype=datetime64 are broken :( https://github.com/numpy/numpy/issues/4337 https://github.com/numpy/numpy/issues/7619 TODO: remove this (replace with np.asarray) once these issues are fixed A netcdf-like variable consisting of dimensions, data and attributes which describe a single Array. A single Variable object is not fully described outside the context of its parent Dataset (if you want such a fully described object, use a DataArray instead). The main functional difference between Variables and numpy arrays is that numerical operations on Variables implement array broadcasting by dimension name. For example, adding an Variable with dimensions `('time',)` to another Variable with dimensions `('space',)` results in a new Variable with dimensions `('time', 'space')`. Furthermore, numpy reduce operations like ``mean`` or ``sum`` are overwritten to take a "dimension" argument instead of an "axis". Variables are light-weight objects used as the building block for datasets. They are more primitive objects, so operations with them provide marginally higher performance than using DataArrays. However, manipulating data in the form of a Dataset or DataArray should almost always be preferred, because they can use more complete metadata in context of coordinate labels. Parameters ---------- dims : str or sequence of str Name(s) of the the data dimension(s). Must be either a string (only for 1D data) or a sequence of strings with length equal to the number of dimensions. data : array_like Data array which supports numpy-like data access. attrs : dict_like or None, optional Attributes to assign to the new variable. If None (default), an empty attribute dictionary is initialized. encoding : dict_like or None, optional Dictionary specifying how to encode this array's data into a serialized format like netCDF4. Currently used keys (for netCDF) include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'. Well-behaved code to serialize a Variable should ignore unrecognized encoding items. Copy of the Variable object, with data cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. ‘C’ means C order, ‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to the order the array elements appear in memory as possible. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to False and the `dtype` requirement is satisfied, the input array is returned instead of a copy. keep_attrs : bool, optional By default, astype keeps attributes. Set to False to remove attributes in the returned object. Returns ------- out : same as object New object with data cast to the specified type. Notes ----- The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed through to the ``astype`` method of the underlying array when a value different than ``None`` is supplied. Make sure to only supply these arguments if the underlying array class supports them. See also -------- numpy.ndarray.astype dask.array.Array.astype sparse.COO.astype Manually trigger loading of this variable's data from disk or a remote source into memory and return this variable. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute Manually trigger loading of this variable's data from disk or a remote source into memory and return a new variable. The original is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute # Use v.data, instead of v._data, in order to cope with the wrappers # around NetCDF and the like The variable's data as a numpy.ndarray Return this variable as a base xarray.Variable Return this variable as an xarray.IndexVariable Convert this variable to a pandas.Index Dictionary representation of variable. Tuple of dimension names with which this variable is associated. Prepare an indexing key for an indexing operation. Parameters ----------- key: int, slice, array-like, dict or tuple of integer, slice and array-like Any valid input for indexing. Returns ------- dims : tuple Dimension of the resultant variable. indexers : IndexingTuple subclass Tuple of integer, array-like, or slices to use when indexing self._data. The type of this argument indicates the type of indexing to perform, either basic, outer or vectorized. new_order : Optional[Sequence[int]] Optional reordering to do on the result of indexing. If not None, the first len(new_order) indexing should be moved to these positions. # key is a tuple # key is a tuple of full size # Convert a scalar Variable to an integer # Convert a 0d-array to an integer # Detect it can be mapped as an outer indexer # If all key is unlabeled, or # key can be mapped as an OuterIndexer. # If all key is 1-dimensional and there are no duplicate labels, # key can be mapped as an OuterIndexer. Make sanity checks # Slice by empty list; numpy could not infer the dtype Equivalent numpy's nonzero but returns a tuple of Varibles. # TODO we should replace dask's native nonzero # after https://github.com/dask/dask/issues/1076 is implemented. # boolean indexing case # We only convert slice objects to variables if they share # a dimension with at least one other variable. Otherwise, # we can equivalently leave them as slices aknd transpose # the result. This is significantly faster/more efficient # for most array backends. Return a new Variable object whose contents are consistent with getting the provided key from the underlying data. NB. __getitem__ and __setitem__ implement xarray-style indexing, where if keys are unlabeled arrays, we index the array orthogonally with them. If keys are labeled array (such as Variables), they are broadcasted with our usual scheme and then the array is indexed with the broadcasted key, like numpy's fancy indexing. If you really want to do indexing like `x[x > 0]`, manipulate the numpy array `x.values` directly. Used by IndexVariable to return IndexVariable objects when possible. Index this Variable with -1 remapped to fill_value. # TODO(shoyer): expose this method in public API somewhere (isel?) and # use it for reindex. # TODO(shoyer): add a sanity check that all other integers are # non-negative # TODO(shoyer): add an optimization, remapping -1 to an adjacent value # that is actually indexed rather than mapping it to the last value # along each axis. # dask's indexing is faster this way; also vindex does not # support negative indices yet: # https://github.com/dask/dask/pull/2967 # we need to invert the mask in order to pass data first. This helps # pint to choose the correct unit # TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed # array cannot be indexed along dimensions of size 0, so just # build the mask directly instead. __setitem__ is overloaded to access the underlying numpy values with orthogonal indexing. See __getitem__ for more details. # broadcast to become assignable Dictionary of local attributes on this variable. Dictionary of encodings on this variable. Returns a copy of this object. If `deep=True`, the data array is loaded into memory and copied onto the new object. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. Examples -------- Shallow copy versus deep copy >>> var = xr.Variable(data=[1, 2, 3], dims="x") >>> var.copy() <xarray.Variable (x: 3)> array([1, 2, 3]) >>> var_0 = var.copy(deep=False) >>> var_0[0] = 7 >>> var_0 <xarray.Variable (x: 3)> array([7, 2, 3]) >>> var <xarray.Variable (x: 3)> array([7, 2, 3]) Changing the data using the ``data`` argument maintains the structure of the original object, but with the new data. Original object is unaffected. >>> var.copy(data=[0.1, 0.2, 0.3]) <xarray.Variable (x: 3)> array([0.1, 0.2, 0.3]) >>> var <xarray.Variable (x: 3)> array([7, 2, 3]) See Also -------- pandas.DataFrame.copy # don't share caching between copies # note: # dims is already an immutable tuple # attributes and encoding will be copied when the new Array is created # memo does nothing but is required for compatibility with # copy.deepcopy # mutable objects should not be hashable # https://github.com/python/mypy/issues/4266 # type: ignore Block dimensions for this array's data or None if it's not a dask array. Coerce this array's data into a dask arrays with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name : str, optional Used to generate the name for this array in the internal dask graph. Does not need not be unique. lock : optional Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. Returns ------- chunked : xarray.Variable # Unambiguously handle array storage backends (like NetCDF4 and h5py) # that can't handle general array indexing. For example, in netCDF4 you # can do "outer" indexing along two dimensions independent, which works # differently from how NumPy handles it. # da.from_array works by using lazy indexing with a tuple of slices. # Using OuterIndexer is a pragmatic choice: dask does not yet handle # different indexing types in an explicit way: # https://github.com/dask/dask/issues/2883 # All of our lazily loaded backend array classes should use NumPy # array operations. use sparse-array as backend. # TODO: what to do if dask-backended? Change backend from sparse to np.array Return a new array indexed along the specified dimension(s). Parameters ---------- **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warning": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- obj : Array object A new Array with the selected data and dimensions. In general, the new variable's data will be a view of this variable's data, unless numpy fancy indexing was triggered by using an array indexer, in which case the data will be a copy. Return a new object with squeezed data. Parameters ---------- dim : None or str or tuple of str, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks Return a new Variable with shifted data. Parameters ---------- shifts : mapping of the form {dim: offset} Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value: scalar, optional Value to use for newly missing values **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but shifted data. Return a new Variable with padded data. Parameters ---------- pad_width : mapping of hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : str, default: "constant" See numpy / Dask docs stat_length : int, tuple or mapping of hashable to tuple Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. constant_values : scalar, tuple or mapping of hashable to tuple Used in 'constant'. The values to set the padded values for each axis. end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. reflect_type : {"even", "odd"}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. Returns ------- padded : Variable Variable with the same dimensions and attributes but padded data. # change default behaviour of pad with mode constant # create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty # workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303 # type: ignore # change integer values to a tuple of two of those values and change pad_width to index # create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty # type: ignore # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks Return a new Variable with rolld data. Parameters ---------- shifts : mapping of hashable to int Integer offset to roll along each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but rolled data. Return a new Variable object with transposed dimensions. Parameters ---------- *dims : str, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. Returns ------- transposed : Variable The returned object has transposed data and dimensions with the same attributes as the original. Notes ----- This operation returns a view of this variable's data. It is lazy for dask-backed Variables but not for numpy-backed Variables. See Also -------- numpy.transpose # no need to transpose if only one dimension # or dims are in same order Return a new variable with given set of dimensions. This method might be used to attach new dimension(s) to variable. When possible, this operation does not copy this variable's data. Parameters ---------- dims : str or sequence of str or dict Dimensions to include on the new variable. If a dict, values are used to provide the sizes of new dimensions; otherwise, new dimensions are inserted with length 1. Returns ------- Variable # don't use broadcast_to unless necessary so the result remains # writeable if possible # don't stack Stack any number of existing dimensions into a single new dimension. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dimensions : mapping of hashable to tuple of hashable Mapping of form new_name=(dim1, dim2, ...) describing the names of new dimensions, and the existing dimensions that they replace. **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. Returns ------- stacked : Variable Variable with the same attributes but stacked data. See also -------- Variable.unstack Unstack an existing dimension into multiple new dimensions. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dimensions : mapping of hashable to mapping of hashable to int Mapping of the form old_dim={dim1: size1, ...} describing the names of existing dimensions, and the new dimensions and sizes that they map to. **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. Returns ------- unstacked : Variable Variable with the same attributes but unstacked data. See also -------- Variable.stack Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. # Insert np.newaxis for removed dims # Reduce has produced a scalar value, not an array-like Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. # can't do this lazily: we need to loop through variables at least # twice # TODO: deprecate this option -- we don't need it for groupby # any more. True if two Variables have the same dimensions and values; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for Variables does element-wise comparisons (like numpy.ndarrays). True if two Variables have the values after being broadcast against each other; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. Like equals, but also checks attributes. True if the intersection of two Variable's non-null data is equal; otherwise false. Variables can thus still be equal if there are locations where either, or both, contain NaN values. Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile # move quantile axis to end. required for apply_ufunc # for backward compatibility Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : str Dimension over which to compute rank. pct : bool, optional If True, compute percentage ranks, otherwise compute integer ranks. Returns ------- ranked : Variable See Also -------- Dataset.rank, DataArray.rank Make a rolling_window along dim and add a new_dim to the last place. Parameters ---------- dim : str Dimension over which to compute rolling_window. For nd-rolling, should be list of dimensions. window : int Window size of the rolling For nd-rolling, should be list of integers. window_dim : str New name of the window dimension. For nd-rolling, should be list of integers. center : bool, default: False If True, pad fill_value for both ends. Otherwise, pad in the head of the axis. fill_value value to be filled. Returns ------- Variable that is a view of the original array with a added dimension of size w. The return dim: self.dims + (window_dim, ) The return shape: self.shape + (window, ) Examples -------- >>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4))) >>> v.rolling_window("b", 3, "window_dim") <xarray.Variable (a: 2, b: 4, window_dim: 3)> array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], <BLANKLINE> [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) >>> v.rolling_window("b", 3, "window_dim", center=True) <xarray.Variable (a: 2, b: 4, window_dim: 3)> array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], [ 2., 3., nan]], <BLANKLINE> [[nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.], [ 6., 7., nan]]]) # np.nan is passed Apply reduction function. Construct a reshaped-array for coarsen # remove unrelated dimensions # trim or pad the object # pad Test each value in the array for whether it is a missing value. Returns ------- isnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.isnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var <xarray.Variable (x: 3)> array([ 1., nan, 3.]) >>> var.isnull() <xarray.Variable (x: 3)> array([False, True, False]) Test each value in the array for whether it is not a missing value. Returns ------- notnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.notnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var <xarray.Variable (x: 3)> array([ 1., nan, 3.]) >>> var.notnull() <xarray.Variable (x: 3)> array([ True, False, True]) A (private) method to convert datetime array to numeric dtype See duck_array_ops.datetime_to_numeric Apply argmin or argmax over one or more dimensions, returning the result as a dict of DataArray that can be passed directly to isel. # In future, should do this also when (dim is None and axis is None) # Return int index if single dimension is passed, and is not part of a # sequence # Get a name for the new dimension that does not conflict with any existing # dimension Index or indices of the minimum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : hashable, sequence of hashable or ..., optional The dimensions over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See also -------- DataArray.argmin, DataArray.idxmin Index or indices of the maximum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : hashable, sequence of hashable or ..., optional The dimensions over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See also -------- DataArray.argmax, DataArray.idxmax Wrapper for accommodating a pandas.Index in an xarray.Variable. IndexVariable preserve loaded values in the form of a pandas.Index instead of a NumPy array. Hence, their values are immutable and must always be one- dimensional. They also have a name property, which is the name of their sole dimension unless another name is given. # Unlike in Variable, always eagerly load values into memory # Don't waste time converting pd.Index to np.ndarray # data is already loaded into memory for IndexVariable # https://github.com/python/mypy/issues/1465 # type: ignore # type: ignore # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk() # Dummy # Dummy # returns Variable rather than IndexVariable if multi-dimensional Specialized version of Variable.concat for IndexVariable objects. This exists because we want to avoid converting Index objects to NumPy arrays, if possible. Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Deep is ignored when data is given. Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. # if equiv is specified, super up # otherwise use the native index equals, rather than looking at _data Return this variable as an xarray.IndexVariable Convert this variable to a pandas.Index # n.b. creating a new pandas.Index from an old pandas.Index is # basically free as pandas.Index objects are immutable # set default names for multi-index unnamed levels so that # we can safely rename dimension / coordinate later Return MultiIndex level names or None if this IndexVariable has no MultiIndex. Return a new IndexVariable from a given MultiIndex level. # for backwards compatibility # validate dimensions Create broadcast compatible variables, with the same dimensions. Unlike the result of broadcast_variables(), some variables may have dimensions of size 1 instead of the the size of the broadcast dimension. Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. # `other` satisfies the necessary Variable API for broadcast_variables # rely on numpy broadcasting rules Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. Check for uniqueness of MultiIndex level names in all given variables. Not public API. Used for checking consistency of DataArray and Dataset objects. # Check confliction between level names and dimensions GH:2299 | 2.126207 | 2 |
codeforces.com/1186A/solution.py | zubtsov/competitive-programming | 0 | 712 | number_of_participants, number_of_pens, number_of_notebooks = map(int, input().split())
if number_of_pens >= number_of_participants and number_of_notebooks >= number_of_participants:
print('Yes')
else:
print('No')
| number_of_participants, number_of_pens, number_of_notebooks = map(int, input().split())
if number_of_pens >= number_of_participants and number_of_notebooks >= number_of_participants:
print('Yes')
else:
print('No')
| none | 1 | 3.748929 | 4 |
|
DLA/__main__.py | StanczakDominik/DLA | 0 | 713 | <reponame>StanczakDominik/DLA
from DLA import main_single
d = main_single(1, gotosize=[1e4, 5e4])
d.plot_particles()
d.plot_mass_distribution()
| from DLA import main_single
d = main_single(1, gotosize=[1e4, 5e4])
d.plot_particles()
d.plot_mass_distribution() | none | 1 | 1.568581 | 2 |
|
pyamf/tests/test_util.py | bulutistan/Py3AMF | 42 | 714 | # -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for AMF utilities.
@since: 0.1.0
"""
import unittest
from datetime import datetime
from io import BytesIO
import pyamf
from pyamf import util
from pyamf.tests.util import replace_dict
PosInf = 1e300000
NegInf = -1e300000
NaN = PosInf / PosInf
def isNaN(val):
return str(float(val)) == str(NaN)
def isPosInf(val):
return str(float(val)) == str(PosInf)
def isNegInf(val):
return str(float(val)) == str(NegInf)
class TimestampTestCase(unittest.TestCase):
"""
Test UTC timestamps.
"""
def test_get_timestamp(self):
self.assertEqual(
util.get_timestamp(datetime(2007, 11, 12)),
1194825600
)
def test_get_datetime(self):
self.assertEqual(util.get_datetime(1194825600), datetime(2007, 11, 12))
def test_get_negative_datetime(self):
self.assertEqual(util.get_datetime(-31536000), datetime(1969, 1, 1))
def test_preserved_microseconds(self):
dt = datetime(2009, 3, 8, 23, 30, 47, 770122)
ts = util.get_timestamp(dt)
self.assertEqual(util.get_datetime(ts), dt)
class StringIOTestCase(unittest.TestCase):
def test_create(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.getvalue(), b'')
sp = util.BufferedByteStream(None)
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('')
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('spam')
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'spam')
self.assertEqual(len(sp), 4)
sp = util.BufferedByteStream(BytesIO('this is a test'.encode()))
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'this is a test')
self.assertEqual(len(sp), 14)
self.assertRaises(TypeError, util.BufferedByteStream, self)
def test_getvalue(self):
sp = util.BufferedByteStream()
sp.write('asdfasdf')
self.assertEqual(sp.getvalue(), b'asdfasdf')
sp.write('spam')
self.assertEqual(sp.getvalue(), b'asdfasdfspam')
def test_read(self):
sp = util.BufferedByteStream('this is a test')
self.assertEqual(len(sp), 14)
self.assertEqual(sp.read(1), b't')
self.assertEqual(sp.getvalue(), b'this is a test')
self.assertEqual(len(sp), 14)
self.assertEqual(sp.read(10), b'his is a t')
self.assertEqual(sp.read(), b'est')
def test_seek(self):
sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.tell(), 0)
# Relative to the beginning of the stream
sp.seek(0, 0)
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'a')
self.assertEqual(len(sp), 26)
sp.seek(10, 0)
self.assertEqual(sp.tell(), 10)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'k')
self.assertEqual(len(sp), 26)
sp.seek(-5, 1)
self.assertEqual(sp.tell(), 6)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'g')
self.assertEqual(len(sp), 26)
sp.seek(-3, 2)
self.assertEqual(sp.tell(), 23)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'x')
self.assertEqual(len(sp), 26)
def test_tell(self):
sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(len(sp), 26)
self.assertEqual(sp.tell(), 0)
sp.read(1)
self.assertEqual(sp.tell(), 1)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(len(sp), 26)
sp.read(5)
self.assertEqual(sp.tell(), 6)
def test_truncate(self):
sp = util.BufferedByteStream('abcdef')
self.assertEqual(sp.getvalue(), b'abcdef')
self.assertEqual(len(sp), 6)
sp.truncate()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('hello')
self.assertEqual(sp.getvalue(), b'hello')
self.assertEqual(len(sp), 5)
sp.truncate(3)
self.assertEqual(sp.getvalue(), b'hel')
self.assertEqual(len(sp), 3)
def test_write(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.tell(), 0)
sp.write('hello')
self.assertEqual(sp.getvalue(), b'hello')
self.assertEqual(len(sp), 5)
self.assertEqual(sp.tell(), 5)
sp = util.BufferedByteStream(b'xyz')
self.assertEqual(sp.getvalue(), b'xyz')
self.assertEqual(len(sp), 3)
self.assertEqual(sp.tell(), 0)
sp.write('abc')
self.assertEqual(sp.getvalue(), b'abc')
self.assertEqual(len(sp), 3)
self.assertEqual(sp.tell(), 3)
def test_len(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.tell(), 0)
sp.write('xyz')
self.assertEqual(len(sp), 3)
sp = util.BufferedByteStream('foo')
self.assertEqual(len(sp), 3)
sp.seek(0, 2)
sp.write('xyz')
self.assertEqual(len(sp), 6)
def test_consume(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(sp.tell(), 0)
sp.consume()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(sp.tell(), 0)
sp = util.BufferedByteStream('foobar')
self.assertEqual(sp.getvalue(), b'foobar')
self.assertEqual(sp.tell(), 0)
sp.seek(3)
self.assertEqual(sp.tell(), 3)
sp.consume()
self.assertEqual(sp.getvalue(), b'bar')
self.assertEqual(sp.tell(), 0)
# from ticket 451 - http://pyamf.org/ticket/451
sp = util.BufferedByteStream('abcdef')
# move the stream pos to the end
sp.read()
self.assertEqual(len(sp), 6)
sp.consume()
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('abcdef')
sp.seek(6)
sp.consume()
self.assertEqual(sp.getvalue(), b'')
class DataTypeMixInTestCase(unittest.TestCase):
endians = ('>', '<') # big, little
def _write_endian(self, obj, func, args, expected):
old_endian = obj.endian
for x in range(2):
obj.truncate()
obj.endian = self.endians[x]
func(*args)
self.assertEqual(obj.getvalue(), expected[x])
obj.endian = old_endian
def _read_endian(self, data, func, args, expected):
for x in range(2):
obj = util.BufferedByteStream(data[x])
obj.endian = self.endians[x]
result = getattr(obj, func)(*args)
self.assertEqual(result, expected)
def test_read_uchar(self):
x = util.BufferedByteStream(b'\x00\xff')
self.assertEqual(x.read_uchar(), 0)
self.assertEqual(x.read_uchar(), 255)
def test_write_uchar(self):
x = util.BufferedByteStream()
x.write_uchar(0)
self.assertEqual(x.getvalue(), b'\x00')
x.write_uchar(255)
self.assertEqual(x.getvalue(), b'\x00\xff')
self.assertRaises(OverflowError, x.write_uchar, 256)
self.assertRaises(OverflowError, x.write_uchar, -1)
self.assertRaises(TypeError, x.write_uchar, 'f')
def test_read_char(self):
x = util.BufferedByteStream(b'\x00\x7f\xff\x80')
self.assertEqual(x.read_char(), 0)
self.assertEqual(x.read_char(), 127)
self.assertEqual(x.read_char(), -1)
self.assertEqual(x.read_char(), -128)
def test_write_char(self):
x = util.BufferedByteStream()
x.write_char(0)
x.write_char(-128)
x.write_char(127)
self.assertEqual(x.getvalue(), b'\x00\x80\x7f')
self.assertRaises(OverflowError, x.write_char, 128)
self.assertRaises(OverflowError, x.write_char, -129)
self.assertRaises(TypeError, x.write_char, 'f')
def test_write_ushort(self):
x = util.BufferedByteStream()
self._write_endian(x, x.write_ushort, (0,), (b'\x00\x00', b'\x00\x00'))
self._write_endian(x, x.write_ushort, (12345,), (b'09', b'90'))
self._write_endian(
x,
x.write_ushort,
(65535,),
(b'\xff\xff', b'\xff\xff')
)
self.assertRaises(OverflowError, x.write_ushort, 65536)
self.assertRaises(OverflowError, x.write_ushort, -1)
self.assertRaises(TypeError, x.write_ushort, 'aa')
def test_read_ushort(self):
self._read_endian([b'\x00\x00', b'\x00\x00'], 'read_ushort', (), 0)
self._read_endian(['09', '90'], 'read_ushort', (), 12345)
self._read_endian([b'\xff\xff', b'\xff\xff'], 'read_ushort', (), 65535)
def test_write_short(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_short,
(-5673,),
(b'\xe9\xd7', b'\xd7\xe9')
)
self._write_endian(
x, x.write_short,
(32767,),
(b'\x7f\xff', b'\xff\x7f')
)
self.assertRaises(OverflowError, x.write_ushort, 65537)
self.assertRaises(OverflowError, x.write_ushort, -1)
self.assertRaises(TypeError, x.write_short, '\x00\x00')
def test_read_short(self):
self._read_endian([b'\xe9\xd7', b'\xd7\xe9'], 'read_short', (), -5673)
self._read_endian([b'\x7f\xff', b'\xff\x7f'], 'read_short', (), 32767)
def test_write_ulong(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_ulong,
(0,),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')
)
self._write_endian(
x,
x.write_ulong,
(16810049,),
(b'\x01\x00\x80A', b'A\x80\x00\x01')
)
self._write_endian(
x,
x.write_ulong,
(4294967295,),
(b'\xff\xff\xff\xff', b'\xff\xff\xff\xff')
)
self.assertRaises(OverflowError, x.write_ulong, 4294967296)
self.assertRaises(OverflowError, x.write_ulong, -1)
self.assertRaises(TypeError, x.write_ulong, '\x00\x00\x00\x00')
def test_read_ulong(self):
self._read_endian(
[b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'],
'read_ulong',
(),
0
)
self._read_endian(
[b'\x01\x00\x80A', b'A\x80\x00\x01'],
'read_ulong',
(),
16810049
)
self._read_endian(
[b'\xff\xff\xff\xff', b'\xff\xff\xff\xff'],
'read_ulong',
(),
4294967295
)
def test_write_long(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_long,
(0,),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')
)
self._write_endian(
x,
x.write_long,
(16810049,),
(b'\x01\x00\x80A', b'A\x80\x00\x01')
)
self._write_endian(
x,
x.write_long,
(2147483647,),
(b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f')
)
self._write_endian(
x,
x.write_long,
(-2147483648,),
(b'\x80\x00\x00\x00', b'\x00\x00\x00\x80')
)
self.assertRaises(OverflowError, x.write_long, 2147483648)
self.assertRaises(OverflowError, x.write_long, -2147483649)
self.assertRaises(TypeError, x.write_long, '\x00\x00\x00\x00')
def test_read_long(self):
self._read_endian(
[b'\xff\xff\xcf\xc7', b'\xc7\xcf\xff\xff'],
'read_long',
(),
-12345
)
self._read_endian(
[b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'],
'read_long',
(),
0
)
self._read_endian(
[b'\x01\x00\x80A', b'A\x80\x00\x01'],
'read_long',
(),
16810049
)
self._read_endian(
[b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f'],
'read_long',
(),
2147483647
)
def test_write_u24bit(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_24bit_uint,
(0,),
(b'\x00\x00\x00', b'\x00\x00\x00')
)
self._write_endian(
x,
x.write_24bit_uint,
(4292609,),
(b'A\x80\x01', b'\x01\x80A')
)
self._write_endian(
x,
x.write_24bit_uint,
(16777215,),
(b'\xff\xff\xff', b'\xff\xff\xff')
)
self.assertRaises(OverflowError, x.write_24bit_uint, 16777216)
self.assertRaises(OverflowError, x.write_24bit_uint, -1)
self.assertRaises(TypeError, x.write_24bit_uint, '\x00\x00\x00')
def test_read_u24bit(self):
self._read_endian(
[b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_uint', (), 0
)
self._read_endian(
[b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_uint', (), 128
)
self._read_endian(
[b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_uint', (), 8388608
)
self._read_endian(
[b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_uint', (), 16777087
)
self._read_endian(
[b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_uint', (), 8388607
)
def test_write_24bit(self):
x = util.BufferedByteStream()
self._write_endian(
x, x.write_24bit_int, (0,), (b'\x00\x00\x00', b'\x00\x00\x00')
)
self._write_endian(
x, x.write_24bit_int, (128,), (b'\x00\x00\x80', b'\x80\x00\x00')
)
self._write_endian(
x, x.write_24bit_int, (8388607,), (b'\x7f\xff\xff', b'\xff\xff\x7f')
)
self._write_endian(
x, x.write_24bit_int, (-1,), (b'\xff\xff\xff', b'\xff\xff\xff')
)
self._write_endian(
x, x.write_24bit_int, (-8388608,), (b'\x80\x00\x00', b'\x00\x00\x80')
)
self.assertRaises(OverflowError, x.write_24bit_int, 8388608)
self.assertRaises(OverflowError, x.write_24bit_int, -8388609)
self.assertRaises(TypeError, x.write_24bit_int, '\x00\x00\x00')
def test_read_24bit(self):
self._read_endian(
[b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_int', (), 0
)
self._read_endian(
[b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_int', (), 128
)
self._read_endian(
[b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_int', (), -8388608
)
self._read_endian(
[b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_int', (), -129
)
self._read_endian(
[b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_int', (), 8388607
)
def test_write_float(self):
x = util.BufferedByteStream()
self._write_endian(
x, x.write_float, (0.2,), (b'>L\xcc\xcd', b'\xcd\xccL>')
)
self.assertRaises(TypeError, x.write_float, 'foo')
def test_read_float(self):
self._read_endian(
[b'?\x00\x00\x00', b'\x00\x00\x00?'], 'read_float', (), 0.5
)
def test_write_double(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_double,
(0.2,),
(b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?')
)
self.assertRaises(TypeError, x.write_double, 'foo')
def test_read_double(self):
self._read_endian(
[b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?'],
'read_double',
(),
0.2
)
def test_write_utf8_string(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_utf8_string,
(u'ᚠᛇᚻ',),
[b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2
)
self.assertRaises(TypeError, x.write_utf8_string, 1)
self.assertRaises(TypeError, x.write_utf8_string, 1.0)
self.assertRaises(TypeError, x.write_utf8_string, object())
x.write_utf8_string('\xff')
def test_read_utf8_string(self):
self._read_endian(
[b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2,
'read_utf8_string',
(9,),
u'ᚠᛇᚻ'
)
def test_nan(self):
x = util.BufferedByteStream(b'\xff\xf8\x00\x00\x00\x00\x00\x00')
self.assertTrue(isNaN(x.read_double()))
x = util.BufferedByteStream(b'\xff\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(isNegInf(x.read_double()))
x = util.BufferedByteStream(b'\x7f\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(isPosInf(x.read_double()))
# now test little endian
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf8\xff')
x.endian = '<'
self.assertTrue(isNaN(x.read_double()))
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\xff')
x.endian = '<'
self.assertTrue(isNegInf(x.read_double()))
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\x7f')
x.endian = '<'
self.assertTrue(isPosInf(x.read_double()))
def test_write_infinites(self):
x = util.BufferedByteStream()
self._write_endian(x, x.write_double, (NaN,), (
b'\xff\xf8\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf8\xff'
))
self._write_endian(x, x.write_double, (PosInf,), (
b'\x7f\xf0\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf0\x7f'
))
self._write_endian(x, x.write_double, (NegInf,), (
b'\xff\xf0\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf0\xff'
))
class BufferedByteStreamTestCase(unittest.TestCase):
"""
Tests for L{BufferedByteStream<util.BufferedByteStream>}
"""
def test_create(self):
x = util.BufferedByteStream()
self.assertEqual(x.getvalue(), b'')
self.assertEqual(x.tell(), 0)
x = util.BufferedByteStream('abc')
self.assertEqual(x.getvalue(), b'abc')
self.assertEqual(x.tell(), 0)
def test_read(self):
x = util.BufferedByteStream()
self.assertEqual(x.tell(), 0)
self.assertEqual(len(x), 0)
self.assertRaises(IOError, x.read)
self.assertRaises(IOError, x.read, 10)
x.write('hello')
x.seek(0)
self.assertRaises(IOError, x.read, 10)
self.assertEqual(x.read(), b'hello')
def test_read_negative(self):
"""
@see: #799
"""
x = util.BufferedByteStream()
x.write('*' * 6000)
x.seek(100)
self.assertRaises(IOError, x.read, -345)
def test_peek(self):
x = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(x.tell(), 0)
self.assertEqual(x.peek(), b'a')
self.assertEqual(x.peek(5), b'abcde')
self.assertEqual(x.peek(-1), b'abcdefghijklmnopqrstuvwxyz')
x.seek(10)
self.assertEqual(x.peek(50), b'klmnopqrstuvwxyz')
def test_eof(self):
x = util.BufferedByteStream()
self.assertTrue(x.at_eof())
x.write('hello')
x.seek(0)
self.assertFalse(x.at_eof())
x.seek(0, 2)
self.assertTrue(x.at_eof())
def test_remaining(self):
x = util.BufferedByteStream('spameggs')
self.assertEqual(x.tell(), 0)
self.assertEqual(x.remaining(), 8)
x.seek(2)
self.assertEqual(x.tell(), 2)
self.assertEqual(x.remaining(), 6)
def test_add(self):
a = util.BufferedByteStream('a')
b = util.BufferedByteStream('b')
c = a + b
self.assertTrue(isinstance(c, util.BufferedByteStream))
self.assertEqual(c.getvalue(), b'ab')
self.assertEqual(c.tell(), 0)
def test_add_pos(self):
a = util.BufferedByteStream(b'abc')
b = util.BufferedByteStream(b'def')
a.seek(1)
b.seek(0, 2)
self.assertEqual(a.tell(), 1)
self.assertEqual(b.tell(), 3)
self.assertEqual(a.tell(), 1)
self.assertEqual(b.tell(), 3)
def test_append_types(self):
# test non string types
a = util.BufferedByteStream()
self.assertRaises(TypeError, a.append, 234234)
self.assertRaises(TypeError, a.append, 234.0)
self.assertRaises(TypeError, a.append, 234234)
self.assertRaises(TypeError, a.append, [])
self.assertRaises(TypeError, a.append, {})
self.assertRaises(TypeError, a.append, lambda _: None)
self.assertRaises(TypeError, a.append, ())
self.assertRaises(TypeError, a.append, object())
def test_append_string(self):
"""
Test L{util.BufferedByteStream.append} with C{str} objects.
"""
# test empty
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append('foo')
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 3)
# test pointer beginning, some data
a = util.BufferedByteStream('bar')
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer middle, some data
a = util.BufferedByteStream('bar')
a.seek(2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 2)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer end, some data
a = util.BufferedByteStream('bar')
a.seek(0, 2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 3)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
class Foo(object):
def getvalue(self):
return b'foo'
def __str__(self):
raise AttributeError()
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append(Foo())
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
def test_append_unicode(self):
"""
Test L{util.BufferedByteStream.append} with C{unicode} objects.
"""
# test empty
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append('foo')
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 3)
# test pointer beginning, some data
a = util.BufferedByteStream('bar')
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer middle, some data
a = util.BufferedByteStream('bar')
a.seek(2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 2)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer end, some data
a = util.BufferedByteStream('bar')
a.seek(0, 2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 3)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
class Foo(object):
def getvalue(self):
return u'foo'
def __str__(self):
raise AttributeError()
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append(Foo())
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
class DummyAlias(pyamf.ClassAlias):
pass
class AnotherDummyAlias(pyamf.ClassAlias):
pass
class YADummyAlias(pyamf.ClassAlias):
pass
class ClassAliasTestCase(unittest.TestCase):
def setUp(self):
self.old_aliases = pyamf.ALIAS_TYPES.copy()
def tearDown(self):
replace_dict(self.old_aliases, pyamf.ALIAS_TYPES)
def test_simple(self):
class A(object):
pass
pyamf.register_alias_type(DummyAlias, A)
self.assertEqual(util.get_class_alias(A), DummyAlias)
def test_nested(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
pyamf.register_alias_type(DummyAlias, A, B, C)
self.assertEqual(util.get_class_alias(B), DummyAlias)
def test_multiple(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
pyamf.register_alias_type(DummyAlias, A)
pyamf.register_alias_type(AnotherDummyAlias, B)
pyamf.register_alias_type(YADummyAlias, C)
self.assertEqual(util.get_class_alias(B), AnotherDummyAlias)
self.assertEqual(util.get_class_alias(C), YADummyAlias)
self.assertEqual(util.get_class_alias(A), DummyAlias)
def test_none_existant(self):
self.assertEqual(util.get_class_alias(self.__class__), None)
def test_subclass(self):
class A(object):
pass
class B(A):
pass
pyamf.register_alias_type(DummyAlias, A)
self.assertEqual(util.get_class_alias(B), DummyAlias)
class IsClassSealedTestCase(unittest.TestCase):
"""
Tests for L{util.is_class_sealed}
"""
def test_new_mixed(self):
class A(object):
__slots__ = ['foo', 'bar']
class B(A):
pass
class C(B):
__slots__ = ('spam', 'eggs')
self.assertTrue(util.is_class_sealed(A))
self.assertFalse(util.is_class_sealed(B))
self.assertFalse(util.is_class_sealed(C))
def test_deep(self):
class A(object):
__slots__ = ['foo', 'bar']
class B(A):
__slots__ = ('gak',)
class C(B):
pass
self.assertTrue(util.is_class_sealed(A))
self.assertTrue(util.is_class_sealed(B))
self.assertFalse(util.is_class_sealed(C))
class GetClassMetaTestCase(unittest.TestCase):
"""
Tests for L{util.get_class_meta}
"""
def test_types(self):
class A:
pass
class B(object):
pass
for t in ['', u'', 1, 1.0, 1, [], {}, object, object(), A(), B()]:
self.assertRaises(TypeError, util.get_class_meta, t)
def test_no_meta(self):
class A:
pass
class B(object):
pass
empty = {
'readonly_attrs': None,
'static_attrs': None,
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'exclude_attrs': None,
'proxy_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), empty)
self.assertEqual(util.get_class_meta(B), empty)
def test_alias(self):
class A:
class __amf__:
alias = 'foo.bar.Spam'
class B(object):
class __amf__:
alias = 'foo.bar.Spam'
meta = {
'readonly_attrs': None,
'static_attrs': None,
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': 'foo.bar.Spam',
'amf3': None,
'proxy_attrs': None,
'exclude_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_static(self):
class A:
class __amf__:
static = ['foo', 'bar']
class B(object):
class __amf__:
static = ['foo', 'bar']
meta = {
'readonly_attrs': None,
'static_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'exclude_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_exclude(self):
class A:
class __amf__:
exclude = ['foo', 'bar']
class B(object):
class __amf__:
exclude = ['foo', 'bar']
meta = {
'readonly_attrs': None,
'exclude_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'proxy_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_readonly(self):
class A:
class __amf__:
readonly = ['foo', 'bar']
class B(object):
class __amf__:
readonly = ['foo', 'bar']
meta = {
'exclude_attrs': None,
'readonly_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None,
'proxy_attrs': None,
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_amf3(self):
class A:
class __amf__:
amf3 = True
class B(object):
class __amf__:
amf3 = True
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': True,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_dynamic(self):
class A:
class __amf__:
dynamic = False
class B(object):
class __amf__:
dynamic = False
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': False,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_external(self):
class A:
class __amf__:
external = True
class B(object):
class __amf__:
external = True
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': True
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_dict(self):
meta = {
'exclude': ['foo'],
'readonly': ['bar'],
'dynamic': False,
'alias': 'spam.eggs',
'proxy_attrs': None,
'synonym_attrs': None,
'amf3': True,
'static': ['baz'],
'external': True
}
class A:
__amf__ = meta
class B(object):
__amf__ = meta
ret = {
'readonly_attrs': ['bar'],
'static_attrs': ['baz'],
'proxy_attrs': None,
'dynamic': False,
'alias': 'spam.eggs',
'amf3': True,
'exclude_attrs': ['foo'],
'synonym_attrs': None,
'proxy_attrs': None,
'external': True
}
self.assertEqual(util.get_class_meta(A), ret)
self.assertEqual(util.get_class_meta(B), ret)
def test_proxy(self):
class A:
class __amf__:
proxy = ['foo', 'bar']
class B(object):
class __amf__:
proxy = ['foo', 'bar']
meta = {
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_synonym(self):
class A:
class __amf__:
synonym = {'foo': 'bar'}
class B(object):
class __amf__:
synonym = {'foo': 'bar'}
meta = {
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'synonym_attrs': {'foo': 'bar'},
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
| # -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for AMF utilities.
@since: 0.1.0
"""
import unittest
from datetime import datetime
from io import BytesIO
import pyamf
from pyamf import util
from pyamf.tests.util import replace_dict
PosInf = 1e300000
NegInf = -1e300000
NaN = PosInf / PosInf
def isNaN(val):
return str(float(val)) == str(NaN)
def isPosInf(val):
return str(float(val)) == str(PosInf)
def isNegInf(val):
return str(float(val)) == str(NegInf)
class TimestampTestCase(unittest.TestCase):
"""
Test UTC timestamps.
"""
def test_get_timestamp(self):
self.assertEqual(
util.get_timestamp(datetime(2007, 11, 12)),
1194825600
)
def test_get_datetime(self):
self.assertEqual(util.get_datetime(1194825600), datetime(2007, 11, 12))
def test_get_negative_datetime(self):
self.assertEqual(util.get_datetime(-31536000), datetime(1969, 1, 1))
def test_preserved_microseconds(self):
dt = datetime(2009, 3, 8, 23, 30, 47, 770122)
ts = util.get_timestamp(dt)
self.assertEqual(util.get_datetime(ts), dt)
class StringIOTestCase(unittest.TestCase):
def test_create(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.getvalue(), b'')
sp = util.BufferedByteStream(None)
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('')
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('spam')
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'spam')
self.assertEqual(len(sp), 4)
sp = util.BufferedByteStream(BytesIO('this is a test'.encode()))
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'this is a test')
self.assertEqual(len(sp), 14)
self.assertRaises(TypeError, util.BufferedByteStream, self)
def test_getvalue(self):
sp = util.BufferedByteStream()
sp.write('asdfasdf')
self.assertEqual(sp.getvalue(), b'asdfasdf')
sp.write('spam')
self.assertEqual(sp.getvalue(), b'asdfasdfspam')
def test_read(self):
sp = util.BufferedByteStream('this is a test')
self.assertEqual(len(sp), 14)
self.assertEqual(sp.read(1), b't')
self.assertEqual(sp.getvalue(), b'this is a test')
self.assertEqual(len(sp), 14)
self.assertEqual(sp.read(10), b'his is a t')
self.assertEqual(sp.read(), b'est')
def test_seek(self):
sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.tell(), 0)
# Relative to the beginning of the stream
sp.seek(0, 0)
self.assertEqual(sp.tell(), 0)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'a')
self.assertEqual(len(sp), 26)
sp.seek(10, 0)
self.assertEqual(sp.tell(), 10)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'k')
self.assertEqual(len(sp), 26)
sp.seek(-5, 1)
self.assertEqual(sp.tell(), 6)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'g')
self.assertEqual(len(sp), 26)
sp.seek(-3, 2)
self.assertEqual(sp.tell(), 23)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.read(1), b'x')
self.assertEqual(len(sp), 26)
def test_tell(self):
sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(len(sp), 26)
self.assertEqual(sp.tell(), 0)
sp.read(1)
self.assertEqual(sp.tell(), 1)
self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz')
self.assertEqual(len(sp), 26)
sp.read(5)
self.assertEqual(sp.tell(), 6)
def test_truncate(self):
sp = util.BufferedByteStream('abcdef')
self.assertEqual(sp.getvalue(), b'abcdef')
self.assertEqual(len(sp), 6)
sp.truncate()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('hello')
self.assertEqual(sp.getvalue(), b'hello')
self.assertEqual(len(sp), 5)
sp.truncate(3)
self.assertEqual(sp.getvalue(), b'hel')
self.assertEqual(len(sp), 3)
def test_write(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.tell(), 0)
sp.write('hello')
self.assertEqual(sp.getvalue(), b'hello')
self.assertEqual(len(sp), 5)
self.assertEqual(sp.tell(), 5)
sp = util.BufferedByteStream(b'xyz')
self.assertEqual(sp.getvalue(), b'xyz')
self.assertEqual(len(sp), 3)
self.assertEqual(sp.tell(), 0)
sp.write('abc')
self.assertEqual(sp.getvalue(), b'abc')
self.assertEqual(len(sp), 3)
self.assertEqual(sp.tell(), 3)
def test_len(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(len(sp), 0)
self.assertEqual(sp.tell(), 0)
sp.write('xyz')
self.assertEqual(len(sp), 3)
sp = util.BufferedByteStream('foo')
self.assertEqual(len(sp), 3)
sp.seek(0, 2)
sp.write('xyz')
self.assertEqual(len(sp), 6)
def test_consume(self):
sp = util.BufferedByteStream()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(sp.tell(), 0)
sp.consume()
self.assertEqual(sp.getvalue(), b'')
self.assertEqual(sp.tell(), 0)
sp = util.BufferedByteStream('foobar')
self.assertEqual(sp.getvalue(), b'foobar')
self.assertEqual(sp.tell(), 0)
sp.seek(3)
self.assertEqual(sp.tell(), 3)
sp.consume()
self.assertEqual(sp.getvalue(), b'bar')
self.assertEqual(sp.tell(), 0)
# from ticket 451 - http://pyamf.org/ticket/451
sp = util.BufferedByteStream('abcdef')
# move the stream pos to the end
sp.read()
self.assertEqual(len(sp), 6)
sp.consume()
self.assertEqual(len(sp), 0)
sp = util.BufferedByteStream('abcdef')
sp.seek(6)
sp.consume()
self.assertEqual(sp.getvalue(), b'')
class DataTypeMixInTestCase(unittest.TestCase):
endians = ('>', '<') # big, little
def _write_endian(self, obj, func, args, expected):
old_endian = obj.endian
for x in range(2):
obj.truncate()
obj.endian = self.endians[x]
func(*args)
self.assertEqual(obj.getvalue(), expected[x])
obj.endian = old_endian
def _read_endian(self, data, func, args, expected):
for x in range(2):
obj = util.BufferedByteStream(data[x])
obj.endian = self.endians[x]
result = getattr(obj, func)(*args)
self.assertEqual(result, expected)
def test_read_uchar(self):
x = util.BufferedByteStream(b'\x00\xff')
self.assertEqual(x.read_uchar(), 0)
self.assertEqual(x.read_uchar(), 255)
def test_write_uchar(self):
x = util.BufferedByteStream()
x.write_uchar(0)
self.assertEqual(x.getvalue(), b'\x00')
x.write_uchar(255)
self.assertEqual(x.getvalue(), b'\x00\xff')
self.assertRaises(OverflowError, x.write_uchar, 256)
self.assertRaises(OverflowError, x.write_uchar, -1)
self.assertRaises(TypeError, x.write_uchar, 'f')
def test_read_char(self):
x = util.BufferedByteStream(b'\x00\x7f\xff\x80')
self.assertEqual(x.read_char(), 0)
self.assertEqual(x.read_char(), 127)
self.assertEqual(x.read_char(), -1)
self.assertEqual(x.read_char(), -128)
def test_write_char(self):
x = util.BufferedByteStream()
x.write_char(0)
x.write_char(-128)
x.write_char(127)
self.assertEqual(x.getvalue(), b'\x00\x80\x7f')
self.assertRaises(OverflowError, x.write_char, 128)
self.assertRaises(OverflowError, x.write_char, -129)
self.assertRaises(TypeError, x.write_char, 'f')
def test_write_ushort(self):
x = util.BufferedByteStream()
self._write_endian(x, x.write_ushort, (0,), (b'\x00\x00', b'\x00\x00'))
self._write_endian(x, x.write_ushort, (12345,), (b'09', b'90'))
self._write_endian(
x,
x.write_ushort,
(65535,),
(b'\xff\xff', b'\xff\xff')
)
self.assertRaises(OverflowError, x.write_ushort, 65536)
self.assertRaises(OverflowError, x.write_ushort, -1)
self.assertRaises(TypeError, x.write_ushort, 'aa')
def test_read_ushort(self):
self._read_endian([b'\x00\x00', b'\x00\x00'], 'read_ushort', (), 0)
self._read_endian(['09', '90'], 'read_ushort', (), 12345)
self._read_endian([b'\xff\xff', b'\xff\xff'], 'read_ushort', (), 65535)
def test_write_short(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_short,
(-5673,),
(b'\xe9\xd7', b'\xd7\xe9')
)
self._write_endian(
x, x.write_short,
(32767,),
(b'\x7f\xff', b'\xff\x7f')
)
self.assertRaises(OverflowError, x.write_ushort, 65537)
self.assertRaises(OverflowError, x.write_ushort, -1)
self.assertRaises(TypeError, x.write_short, '\x00\x00')
def test_read_short(self):
self._read_endian([b'\xe9\xd7', b'\xd7\xe9'], 'read_short', (), -5673)
self._read_endian([b'\x7f\xff', b'\xff\x7f'], 'read_short', (), 32767)
def test_write_ulong(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_ulong,
(0,),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')
)
self._write_endian(
x,
x.write_ulong,
(16810049,),
(b'\x01\x00\x80A', b'A\x80\x00\x01')
)
self._write_endian(
x,
x.write_ulong,
(4294967295,),
(b'\xff\xff\xff\xff', b'\xff\xff\xff\xff')
)
self.assertRaises(OverflowError, x.write_ulong, 4294967296)
self.assertRaises(OverflowError, x.write_ulong, -1)
self.assertRaises(TypeError, x.write_ulong, '\x00\x00\x00\x00')
def test_read_ulong(self):
self._read_endian(
[b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'],
'read_ulong',
(),
0
)
self._read_endian(
[b'\x01\x00\x80A', b'A\x80\x00\x01'],
'read_ulong',
(),
16810049
)
self._read_endian(
[b'\xff\xff\xff\xff', b'\xff\xff\xff\xff'],
'read_ulong',
(),
4294967295
)
def test_write_long(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_long,
(0,),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')
)
self._write_endian(
x,
x.write_long,
(16810049,),
(b'\x01\x00\x80A', b'A\x80\x00\x01')
)
self._write_endian(
x,
x.write_long,
(2147483647,),
(b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f')
)
self._write_endian(
x,
x.write_long,
(-2147483648,),
(b'\x80\x00\x00\x00', b'\x00\x00\x00\x80')
)
self.assertRaises(OverflowError, x.write_long, 2147483648)
self.assertRaises(OverflowError, x.write_long, -2147483649)
self.assertRaises(TypeError, x.write_long, '\x00\x00\x00\x00')
def test_read_long(self):
self._read_endian(
[b'\xff\xff\xcf\xc7', b'\xc7\xcf\xff\xff'],
'read_long',
(),
-12345
)
self._read_endian(
[b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'],
'read_long',
(),
0
)
self._read_endian(
[b'\x01\x00\x80A', b'A\x80\x00\x01'],
'read_long',
(),
16810049
)
self._read_endian(
[b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f'],
'read_long',
(),
2147483647
)
def test_write_u24bit(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_24bit_uint,
(0,),
(b'\x00\x00\x00', b'\x00\x00\x00')
)
self._write_endian(
x,
x.write_24bit_uint,
(4292609,),
(b'A\x80\x01', b'\x01\x80A')
)
self._write_endian(
x,
x.write_24bit_uint,
(16777215,),
(b'\xff\xff\xff', b'\xff\xff\xff')
)
self.assertRaises(OverflowError, x.write_24bit_uint, 16777216)
self.assertRaises(OverflowError, x.write_24bit_uint, -1)
self.assertRaises(TypeError, x.write_24bit_uint, '\x00\x00\x00')
def test_read_u24bit(self):
self._read_endian(
[b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_uint', (), 0
)
self._read_endian(
[b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_uint', (), 128
)
self._read_endian(
[b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_uint', (), 8388608
)
self._read_endian(
[b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_uint', (), 16777087
)
self._read_endian(
[b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_uint', (), 8388607
)
def test_write_24bit(self):
x = util.BufferedByteStream()
self._write_endian(
x, x.write_24bit_int, (0,), (b'\x00\x00\x00', b'\x00\x00\x00')
)
self._write_endian(
x, x.write_24bit_int, (128,), (b'\x00\x00\x80', b'\x80\x00\x00')
)
self._write_endian(
x, x.write_24bit_int, (8388607,), (b'\x7f\xff\xff', b'\xff\xff\x7f')
)
self._write_endian(
x, x.write_24bit_int, (-1,), (b'\xff\xff\xff', b'\xff\xff\xff')
)
self._write_endian(
x, x.write_24bit_int, (-8388608,), (b'\x80\x00\x00', b'\x00\x00\x80')
)
self.assertRaises(OverflowError, x.write_24bit_int, 8388608)
self.assertRaises(OverflowError, x.write_24bit_int, -8388609)
self.assertRaises(TypeError, x.write_24bit_int, '\x00\x00\x00')
def test_read_24bit(self):
self._read_endian(
[b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_int', (), 0
)
self._read_endian(
[b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_int', (), 128
)
self._read_endian(
[b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_int', (), -8388608
)
self._read_endian(
[b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_int', (), -129
)
self._read_endian(
[b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_int', (), 8388607
)
def test_write_float(self):
x = util.BufferedByteStream()
self._write_endian(
x, x.write_float, (0.2,), (b'>L\xcc\xcd', b'\xcd\xccL>')
)
self.assertRaises(TypeError, x.write_float, 'foo')
def test_read_float(self):
self._read_endian(
[b'?\x00\x00\x00', b'\x00\x00\x00?'], 'read_float', (), 0.5
)
def test_write_double(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_double,
(0.2,),
(b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?')
)
self.assertRaises(TypeError, x.write_double, 'foo')
def test_read_double(self):
self._read_endian(
[b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?'],
'read_double',
(),
0.2
)
def test_write_utf8_string(self):
x = util.BufferedByteStream()
self._write_endian(
x,
x.write_utf8_string,
(u'ᚠᛇᚻ',),
[b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2
)
self.assertRaises(TypeError, x.write_utf8_string, 1)
self.assertRaises(TypeError, x.write_utf8_string, 1.0)
self.assertRaises(TypeError, x.write_utf8_string, object())
x.write_utf8_string('\xff')
def test_read_utf8_string(self):
self._read_endian(
[b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2,
'read_utf8_string',
(9,),
u'ᚠᛇᚻ'
)
def test_nan(self):
x = util.BufferedByteStream(b'\xff\xf8\x00\x00\x00\x00\x00\x00')
self.assertTrue(isNaN(x.read_double()))
x = util.BufferedByteStream(b'\xff\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(isNegInf(x.read_double()))
x = util.BufferedByteStream(b'\x7f\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(isPosInf(x.read_double()))
# now test little endian
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf8\xff')
x.endian = '<'
self.assertTrue(isNaN(x.read_double()))
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\xff')
x.endian = '<'
self.assertTrue(isNegInf(x.read_double()))
x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\x7f')
x.endian = '<'
self.assertTrue(isPosInf(x.read_double()))
def test_write_infinites(self):
x = util.BufferedByteStream()
self._write_endian(x, x.write_double, (NaN,), (
b'\xff\xf8\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf8\xff'
))
self._write_endian(x, x.write_double, (PosInf,), (
b'\x7f\xf0\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf0\x7f'
))
self._write_endian(x, x.write_double, (NegInf,), (
b'\xff\xf0\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x00\xf0\xff'
))
class BufferedByteStreamTestCase(unittest.TestCase):
"""
Tests for L{BufferedByteStream<util.BufferedByteStream>}
"""
def test_create(self):
x = util.BufferedByteStream()
self.assertEqual(x.getvalue(), b'')
self.assertEqual(x.tell(), 0)
x = util.BufferedByteStream('abc')
self.assertEqual(x.getvalue(), b'abc')
self.assertEqual(x.tell(), 0)
def test_read(self):
x = util.BufferedByteStream()
self.assertEqual(x.tell(), 0)
self.assertEqual(len(x), 0)
self.assertRaises(IOError, x.read)
self.assertRaises(IOError, x.read, 10)
x.write('hello')
x.seek(0)
self.assertRaises(IOError, x.read, 10)
self.assertEqual(x.read(), b'hello')
def test_read_negative(self):
"""
@see: #799
"""
x = util.BufferedByteStream()
x.write('*' * 6000)
x.seek(100)
self.assertRaises(IOError, x.read, -345)
def test_peek(self):
x = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz')
self.assertEqual(x.tell(), 0)
self.assertEqual(x.peek(), b'a')
self.assertEqual(x.peek(5), b'abcde')
self.assertEqual(x.peek(-1), b'abcdefghijklmnopqrstuvwxyz')
x.seek(10)
self.assertEqual(x.peek(50), b'klmnopqrstuvwxyz')
def test_eof(self):
x = util.BufferedByteStream()
self.assertTrue(x.at_eof())
x.write('hello')
x.seek(0)
self.assertFalse(x.at_eof())
x.seek(0, 2)
self.assertTrue(x.at_eof())
def test_remaining(self):
x = util.BufferedByteStream('spameggs')
self.assertEqual(x.tell(), 0)
self.assertEqual(x.remaining(), 8)
x.seek(2)
self.assertEqual(x.tell(), 2)
self.assertEqual(x.remaining(), 6)
def test_add(self):
a = util.BufferedByteStream('a')
b = util.BufferedByteStream('b')
c = a + b
self.assertTrue(isinstance(c, util.BufferedByteStream))
self.assertEqual(c.getvalue(), b'ab')
self.assertEqual(c.tell(), 0)
def test_add_pos(self):
a = util.BufferedByteStream(b'abc')
b = util.BufferedByteStream(b'def')
a.seek(1)
b.seek(0, 2)
self.assertEqual(a.tell(), 1)
self.assertEqual(b.tell(), 3)
self.assertEqual(a.tell(), 1)
self.assertEqual(b.tell(), 3)
def test_append_types(self):
# test non string types
a = util.BufferedByteStream()
self.assertRaises(TypeError, a.append, 234234)
self.assertRaises(TypeError, a.append, 234.0)
self.assertRaises(TypeError, a.append, 234234)
self.assertRaises(TypeError, a.append, [])
self.assertRaises(TypeError, a.append, {})
self.assertRaises(TypeError, a.append, lambda _: None)
self.assertRaises(TypeError, a.append, ())
self.assertRaises(TypeError, a.append, object())
def test_append_string(self):
"""
Test L{util.BufferedByteStream.append} with C{str} objects.
"""
# test empty
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append('foo')
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 3)
# test pointer beginning, some data
a = util.BufferedByteStream('bar')
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer middle, some data
a = util.BufferedByteStream('bar')
a.seek(2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 2)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer end, some data
a = util.BufferedByteStream('bar')
a.seek(0, 2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 3)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
class Foo(object):
def getvalue(self):
return b'foo'
def __str__(self):
raise AttributeError()
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append(Foo())
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
def test_append_unicode(self):
"""
Test L{util.BufferedByteStream.append} with C{unicode} objects.
"""
# test empty
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append('foo')
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 3)
# test pointer beginning, some data
a = util.BufferedByteStream('bar')
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer middle, some data
a = util.BufferedByteStream('bar')
a.seek(2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 2)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
# test pointer end, some data
a = util.BufferedByteStream('bar')
a.seek(0, 2)
self.assertEqual(a.getvalue(), b'bar')
self.assertEqual(a.tell(), 3)
self.assertEqual(len(a), 3)
a.append('gak')
self.assertEqual(a.getvalue(), b'bargak')
self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved
self.assertEqual(len(a), 6)
class Foo(object):
def getvalue(self):
return u'foo'
def __str__(self):
raise AttributeError()
a = util.BufferedByteStream()
self.assertEqual(a.getvalue(), b'')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 0)
a.append(Foo())
self.assertEqual(a.getvalue(), b'foo')
self.assertEqual(a.tell(), 0)
self.assertEqual(len(a), 3)
class DummyAlias(pyamf.ClassAlias):
pass
class AnotherDummyAlias(pyamf.ClassAlias):
pass
class YADummyAlias(pyamf.ClassAlias):
pass
class ClassAliasTestCase(unittest.TestCase):
def setUp(self):
self.old_aliases = pyamf.ALIAS_TYPES.copy()
def tearDown(self):
replace_dict(self.old_aliases, pyamf.ALIAS_TYPES)
def test_simple(self):
class A(object):
pass
pyamf.register_alias_type(DummyAlias, A)
self.assertEqual(util.get_class_alias(A), DummyAlias)
def test_nested(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
pyamf.register_alias_type(DummyAlias, A, B, C)
self.assertEqual(util.get_class_alias(B), DummyAlias)
def test_multiple(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
pyamf.register_alias_type(DummyAlias, A)
pyamf.register_alias_type(AnotherDummyAlias, B)
pyamf.register_alias_type(YADummyAlias, C)
self.assertEqual(util.get_class_alias(B), AnotherDummyAlias)
self.assertEqual(util.get_class_alias(C), YADummyAlias)
self.assertEqual(util.get_class_alias(A), DummyAlias)
def test_none_existant(self):
self.assertEqual(util.get_class_alias(self.__class__), None)
def test_subclass(self):
class A(object):
pass
class B(A):
pass
pyamf.register_alias_type(DummyAlias, A)
self.assertEqual(util.get_class_alias(B), DummyAlias)
class IsClassSealedTestCase(unittest.TestCase):
"""
Tests for L{util.is_class_sealed}
"""
def test_new_mixed(self):
class A(object):
__slots__ = ['foo', 'bar']
class B(A):
pass
class C(B):
__slots__ = ('spam', 'eggs')
self.assertTrue(util.is_class_sealed(A))
self.assertFalse(util.is_class_sealed(B))
self.assertFalse(util.is_class_sealed(C))
def test_deep(self):
class A(object):
__slots__ = ['foo', 'bar']
class B(A):
__slots__ = ('gak',)
class C(B):
pass
self.assertTrue(util.is_class_sealed(A))
self.assertTrue(util.is_class_sealed(B))
self.assertFalse(util.is_class_sealed(C))
class GetClassMetaTestCase(unittest.TestCase):
"""
Tests for L{util.get_class_meta}
"""
def test_types(self):
class A:
pass
class B(object):
pass
for t in ['', u'', 1, 1.0, 1, [], {}, object, object(), A(), B()]:
self.assertRaises(TypeError, util.get_class_meta, t)
def test_no_meta(self):
class A:
pass
class B(object):
pass
empty = {
'readonly_attrs': None,
'static_attrs': None,
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'exclude_attrs': None,
'proxy_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), empty)
self.assertEqual(util.get_class_meta(B), empty)
def test_alias(self):
class A:
class __amf__:
alias = 'foo.bar.Spam'
class B(object):
class __amf__:
alias = 'foo.bar.Spam'
meta = {
'readonly_attrs': None,
'static_attrs': None,
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': 'foo.bar.Spam',
'amf3': None,
'proxy_attrs': None,
'exclude_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_static(self):
class A:
class __amf__:
static = ['foo', 'bar']
class B(object):
class __amf__:
static = ['foo', 'bar']
meta = {
'readonly_attrs': None,
'static_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'exclude_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_exclude(self):
class A:
class __amf__:
exclude = ['foo', 'bar']
class B(object):
class __amf__:
exclude = ['foo', 'bar']
meta = {
'readonly_attrs': None,
'exclude_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'proxy_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_readonly(self):
class A:
class __amf__:
readonly = ['foo', 'bar']
class B(object):
class __amf__:
readonly = ['foo', 'bar']
meta = {
'exclude_attrs': None,
'readonly_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None,
'proxy_attrs': None,
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_amf3(self):
class A:
class __amf__:
amf3 = True
class B(object):
class __amf__:
amf3 = True
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': True,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_dynamic(self):
class A:
class __amf__:
dynamic = False
class B(object):
class __amf__:
dynamic = False
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': False,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_external(self):
class A:
class __amf__:
external = True
class B(object):
class __amf__:
external = True
meta = {
'exclude_attrs': None,
'proxy_attrs': None,
'synonym_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': True
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_dict(self):
meta = {
'exclude': ['foo'],
'readonly': ['bar'],
'dynamic': False,
'alias': 'spam.eggs',
'proxy_attrs': None,
'synonym_attrs': None,
'amf3': True,
'static': ['baz'],
'external': True
}
class A:
__amf__ = meta
class B(object):
__amf__ = meta
ret = {
'readonly_attrs': ['bar'],
'static_attrs': ['baz'],
'proxy_attrs': None,
'dynamic': False,
'alias': 'spam.eggs',
'amf3': True,
'exclude_attrs': ['foo'],
'synonym_attrs': None,
'proxy_attrs': None,
'external': True
}
self.assertEqual(util.get_class_meta(A), ret)
self.assertEqual(util.get_class_meta(B), ret)
def test_proxy(self):
class A:
class __amf__:
proxy = ['foo', 'bar']
class B(object):
class __amf__:
proxy = ['foo', 'bar']
meta = {
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': ['foo', 'bar'],
'synonym_attrs': None,
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
def test_synonym(self):
class A:
class __amf__:
synonym = {'foo': 'bar'}
class B(object):
class __amf__:
synonym = {'foo': 'bar'}
meta = {
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'synonym_attrs': {'foo': 'bar'},
'dynamic': None,
'alias': None,
'amf3': None,
'static_attrs': None,
'external': None
}
self.assertEqual(util.get_class_meta(A), meta)
self.assertEqual(util.get_class_meta(B), meta)
| en | 0.776291 | # -*- coding: utf-8 -*- # # Copyright (c) The PyAMF Project. # See LICENSE.txt for details. Tests for AMF utilities. @since: 0.1.0 Test UTC timestamps. # Relative to the beginning of the stream # from ticket 451 - http://pyamf.org/ticket/451 # move the stream pos to the end # big, little # now test little endian Tests for L{BufferedByteStream<util.BufferedByteStream>} @see: #799 # test non string types Test L{util.BufferedByteStream.append} with C{str} objects. # test empty # <-- pointer hasn't moved # test pointer beginning, some data # <-- pointer hasn't moved # test pointer middle, some data # <-- pointer hasn't moved # test pointer end, some data # <-- pointer hasn't moved Test L{util.BufferedByteStream.append} with C{unicode} objects. # test empty # <-- pointer hasn't moved # test pointer beginning, some data # <-- pointer hasn't moved # test pointer middle, some data # <-- pointer hasn't moved # test pointer end, some data # <-- pointer hasn't moved Tests for L{util.is_class_sealed} Tests for L{util.get_class_meta} | 2.445636 | 2 |
e-valuator.py | keocol/e-valuator | 0 | 715 | <filename>e-valuator.py
import dns.resolver
import sys
import colorama
import platform
from colorama import init, Fore, Back, Style
import re
# pip install -r requirements.txt (colorama)
os = platform.platform()
if os.find('Windows')!= (-1):
init(convert=True)
print("""
███████╗░░░░░░██╗░░░██╗░█████╗░██╗░░░░░██╗░░░██╗░█████╗░████████╗░█████╗░██████╗░
██╔════╝░░░░░░██║░░░██║██╔══██╗██║░░░░░██║░░░██║██╔══██╗╚══██╔══╝██╔══██╗██╔══██╗
█████╗░░█████╗╚██╗░██╔╝███████║██║░░░░░██║░░░██║███████║░░░██║░░░██║░░██║██████╔╝
██╔══╝░░╚════╝░╚████╔╝░██╔══██║██║░░░░░██║░░░██║██╔══██║░░░██║░░░██║░░██║██╔══██╗
███████╗░░░░░░░░╚██╔╝░░██║░░██║███████╗╚██████╔╝██║░░██║░░░██║░░░╚█████╔╝██║░░██║
╚══════╝░░░░░░░░░╚═╝░░░╚═╝░░╚═╝╚══════╝░╚═════╝░╚═╝░░╚═╝░░░╚═╝░░░░╚════╝░╚═╝░░╚═╝
\x1B[3mSimple Python3 Script for Checking SPF & DMARC Records.\x1B[0m
""" + '\n')
Domain = input('Domain: ')
# Checking SPF
print ('\n[+] Checking SPF Record...')
try:
obj_answer = dns.resolver.resolve(Domain, 'TXT')
except:
sys.exit(Fore.RED + "\n[+] Domain can't be resolved! Check the domain name and try again..")
answer = str(obj_answer.response)
cond = answer.find("v=spf")
if cond != -1:
print ('[+] SPF Record Found!')
spf_pos= answer.find("v=spf")
spf_end_tmp= (answer[spf_pos:].find("\n"))-1
spf_end= answer[spf_pos:spf_pos+spf_end_tmp]
print (Fore.GREEN + '[+] Domain: ' + Domain)
print (Fore.GREEN + '[+] SPF Record: ' +spf_end)
neutral_check = answer.find('?all')
fail_check = answer.find('-all')
soft_check = answer.find('~all')
pass_check = answer.find('+all')
if neutral_check != -1:
print (Fore.RED +'[+] Result: ?all IS FOUND!! Domain emails can be spoofed!')
elif fail_check != -1:
print (Fore.GREEN +'[+] Result: -all is found. SPF is correctly configured.')
elif soft_check != -1:
print (Fore.GREEN +'[+] Result: ~all is found. SPF is correctly configured.')
elif pass_check != -1:
print (Fore.RED +'[+] Result: +all DOMAIN IS VERY BADLY CONFIGURED! Domain emails can be spoofed!')
else:
print (Fore.RED +'[+] Result: No condition is set for "all"! Domain emails can be spoofed!')
else:
print (Fore.RED +'[+] No SPF Record Found!!')
# Checking DMARC
print (Fore.WHITE + '\n\n[+] Checking DMARC Policy..')
try:
obj2_answer = dns.resolver.resolve('_dmarc.'+ Domain, 'TXT')
except:
sys.exit(Fore.RED + "[+] The domain doesn't have DMARC policy configured!")
answer2 = str(obj2_answer.response)
print (Fore.WHITE + '[+] DMARC Policy Found!')
none_check = re.search("[\;\s]p\=none\;", answer2)
reject_check = re.search("[\;\s]p\=reject\;", answer2)
quarantine_check = re.search("[\;\s]p\=quarantine\;", answer2)
if none_check:
print (Fore.RED + '[+] Result: DMARC Policy is set as none! Domain emails can be spoofed!')
if reject_check:
print (Fore.GREEN + '[+] Result: DMARC Policy is set as reject! Domain emails are safe from spoofing.')
if quarantine_check:
print (Fore.GREEN + '[+] Result: DMARC Policy is set as quarantine! Domain emails are safe from spoofing.') | <filename>e-valuator.py
import dns.resolver
import sys
import colorama
import platform
from colorama import init, Fore, Back, Style
import re
# pip install -r requirements.txt (colorama)
os = platform.platform()
if os.find('Windows')!= (-1):
init(convert=True)
print("""
███████╗░░░░░░██╗░░░██╗░█████╗░██╗░░░░░██╗░░░██╗░█████╗░████████╗░█████╗░██████╗░
██╔════╝░░░░░░██║░░░██║██╔══██╗██║░░░░░██║░░░██║██╔══██╗╚══██╔══╝██╔══██╗██╔══██╗
█████╗░░█████╗╚██╗░██╔╝███████║██║░░░░░██║░░░██║███████║░░░██║░░░██║░░██║██████╔╝
██╔══╝░░╚════╝░╚████╔╝░██╔══██║██║░░░░░██║░░░██║██╔══██║░░░██║░░░██║░░██║██╔══██╗
███████╗░░░░░░░░╚██╔╝░░██║░░██║███████╗╚██████╔╝██║░░██║░░░██║░░░╚█████╔╝██║░░██║
╚══════╝░░░░░░░░░╚═╝░░░╚═╝░░╚═╝╚══════╝░╚═════╝░╚═╝░░╚═╝░░░╚═╝░░░░╚════╝░╚═╝░░╚═╝
\x1B[3mSimple Python3 Script for Checking SPF & DMARC Records.\x1B[0m
""" + '\n')
Domain = input('Domain: ')
# Checking SPF
print ('\n[+] Checking SPF Record...')
try:
obj_answer = dns.resolver.resolve(Domain, 'TXT')
except:
sys.exit(Fore.RED + "\n[+] Domain can't be resolved! Check the domain name and try again..")
answer = str(obj_answer.response)
cond = answer.find("v=spf")
if cond != -1:
print ('[+] SPF Record Found!')
spf_pos= answer.find("v=spf")
spf_end_tmp= (answer[spf_pos:].find("\n"))-1
spf_end= answer[spf_pos:spf_pos+spf_end_tmp]
print (Fore.GREEN + '[+] Domain: ' + Domain)
print (Fore.GREEN + '[+] SPF Record: ' +spf_end)
neutral_check = answer.find('?all')
fail_check = answer.find('-all')
soft_check = answer.find('~all')
pass_check = answer.find('+all')
if neutral_check != -1:
print (Fore.RED +'[+] Result: ?all IS FOUND!! Domain emails can be spoofed!')
elif fail_check != -1:
print (Fore.GREEN +'[+] Result: -all is found. SPF is correctly configured.')
elif soft_check != -1:
print (Fore.GREEN +'[+] Result: ~all is found. SPF is correctly configured.')
elif pass_check != -1:
print (Fore.RED +'[+] Result: +all DOMAIN IS VERY BADLY CONFIGURED! Domain emails can be spoofed!')
else:
print (Fore.RED +'[+] Result: No condition is set for "all"! Domain emails can be spoofed!')
else:
print (Fore.RED +'[+] No SPF Record Found!!')
# Checking DMARC
print (Fore.WHITE + '\n\n[+] Checking DMARC Policy..')
try:
obj2_answer = dns.resolver.resolve('_dmarc.'+ Domain, 'TXT')
except:
sys.exit(Fore.RED + "[+] The domain doesn't have DMARC policy configured!")
answer2 = str(obj2_answer.response)
print (Fore.WHITE + '[+] DMARC Policy Found!')
none_check = re.search("[\;\s]p\=none\;", answer2)
reject_check = re.search("[\;\s]p\=reject\;", answer2)
quarantine_check = re.search("[\;\s]p\=quarantine\;", answer2)
if none_check:
print (Fore.RED + '[+] Result: DMARC Policy is set as none! Domain emails can be spoofed!')
if reject_check:
print (Fore.GREEN + '[+] Result: DMARC Policy is set as reject! Domain emails are safe from spoofing.')
if quarantine_check:
print (Fore.GREEN + '[+] Result: DMARC Policy is set as quarantine! Domain emails are safe from spoofing.') | ru | 0.117607 | # pip install -r requirements.txt (colorama) ███████╗░░░░░░██╗░░░██╗░█████╗░██╗░░░░░██╗░░░██╗░█████╗░████████╗░█████╗░██████╗░
██╔════╝░░░░░░██║░░░██║██╔══██╗██║░░░░░██║░░░██║██╔══██╗╚══██╔══╝██╔══██╗██╔══██╗
█████╗░░█████╗╚██╗░██╔╝███████║██║░░░░░██║░░░██║███████║░░░██║░░░██║░░██║██████╔╝
██╔══╝░░╚════╝░╚████╔╝░██╔══██║██║░░░░░██║░░░██║██╔══██║░░░██║░░░██║░░██║██╔══██╗
███████╗░░░░░░░░╚██╔╝░░██║░░██║███████╗╚██████╔╝██║░░██║░░░██║░░░╚█████╔╝██║░░██║
╚══════╝░░░░░░░░░╚═╝░░░╚═╝░░╚═╝╚══════╝░╚═════╝░╚═╝░░╚═╝░░░╚═╝░░░░╚════╝░╚═╝░░╚═╝
\x1B[3mSimple Python3 Script for Checking SPF & DMARC Records.\x1B[0m # Checking SPF # Checking DMARC | 2.419026 | 2 |
api/server.py | qh73xe/HowAboutNatume | 0 | 716 | <reponame>qh73xe/HowAboutNatume
# -*- coding: utf-8 -*
"""トルネードを使用した ask.api を作成します."""
from json import dumps
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import parse_command_line
from tornado.web import Application, RequestHandler
from tornado.options import define, options
from tokenizer import get_entity
from logger import getLogger
LOGGER = getLogger('API_MODULE')
define("port", default=8000, help="run on the given port", type=int)
class AskHandler(RequestHandler):
"""question に get された文章と親密度の高い語を返します."""
def get(self):
"""Question に答えます."""
from ask import ask
author = self.get_argument('author')
question = self.get_argument('question')
answers = {
'answers': ask(author, get_entity(question))
}
self.finish(
dumps(
answers,
ensure_ascii=False,
indent=4,
sort_keys=True,
separators=(',', ': ')
)
)
def post(self):
"""Action on google の web フック用レスポンス"""
from ask import ask
import json
data = json.loads(self.request.body)
LOGGER.info('input: {data}'.format(data=data))
author = data.get('author', '夏目漱石')
question = data.get('question')
answers = ask(author, get_entity(question))
if answers:
adjective = answers.get('adjective', None)
nouns = answers.get('nouns')
if adjective:
speech = '。'.join([
'それは {adjective} 質問ですね'.format(adjective=adjective[0]),
'きっと, {0} や {1} あるいは {2} のことです'.format(*nouns)
])
else:
speech = 'それはきっと, {0} や {1} あるいは {2} のことです'.format(*nouns)
else:
speech = '。'.join([
'{q} についてですか'.format(q=question),
'難しいことを聞きますね',
'私にはわからないです'
])
displayText = speech
respose = {
'speech': speech,
'displayText': displayText,
'data': answers,
'contextOut': [answers],
'source': 'how-about-natume'
}
self.finish(
dumps(
respose,
ensure_ascii=False,
indent=4,
sort_keys=True,
separators=(',', ': ')
)
)
if __name__ == "__main__":
parse_command_line()
app = Application(handlers=[(r"/", AskHandler)])
http_server = HTTPServer(app)
http_server.listen(options.port)
IOLoop.instance().start()
| # -*- coding: utf-8 -*
"""トルネードを使用した ask.api を作成します."""
from json import dumps
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import parse_command_line
from tornado.web import Application, RequestHandler
from tornado.options import define, options
from tokenizer import get_entity
from logger import getLogger
LOGGER = getLogger('API_MODULE')
define("port", default=8000, help="run on the given port", type=int)
class AskHandler(RequestHandler):
"""question に get された文章と親密度の高い語を返します."""
def get(self):
"""Question に答えます."""
from ask import ask
author = self.get_argument('author')
question = self.get_argument('question')
answers = {
'answers': ask(author, get_entity(question))
}
self.finish(
dumps(
answers,
ensure_ascii=False,
indent=4,
sort_keys=True,
separators=(',', ': ')
)
)
def post(self):
"""Action on google の web フック用レスポンス"""
from ask import ask
import json
data = json.loads(self.request.body)
LOGGER.info('input: {data}'.format(data=data))
author = data.get('author', '夏目漱石')
question = data.get('question')
answers = ask(author, get_entity(question))
if answers:
adjective = answers.get('adjective', None)
nouns = answers.get('nouns')
if adjective:
speech = '。'.join([
'それは {adjective} 質問ですね'.format(adjective=adjective[0]),
'きっと, {0} や {1} あるいは {2} のことです'.format(*nouns)
])
else:
speech = 'それはきっと, {0} や {1} あるいは {2} のことです'.format(*nouns)
else:
speech = '。'.join([
'{q} についてですか'.format(q=question),
'難しいことを聞きますね',
'私にはわからないです'
])
displayText = speech
respose = {
'speech': speech,
'displayText': displayText,
'data': answers,
'contextOut': [answers],
'source': 'how-about-natume'
}
self.finish(
dumps(
respose,
ensure_ascii=False,
indent=4,
sort_keys=True,
separators=(',', ': ')
)
)
if __name__ == "__main__":
parse_command_line()
app = Application(handlers=[(r"/", AskHandler)])
http_server = HTTPServer(app)
http_server.listen(options.port)
IOLoop.instance().start() | ja | 0.992165 | # -*- coding: utf-8 -* トルネードを使用した ask.api を作成します. question に get された文章と親密度の高い語を返します. Question に答えます. Action on google の web フック用レスポンス | 2.816839 | 3 |
proxy/http/chunk_parser.py | GDGSNF/proxy.py | 0 | 717 | <filename>proxy/http/chunk_parser.py
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import NamedTuple, Tuple, List, Optional
from ..common.utils import bytes_, find_http_line
from ..common.constants import CRLF, DEFAULT_BUFFER_SIZE
ChunkParserStates = NamedTuple(
'ChunkParserStates', [
('WAITING_FOR_SIZE', int),
('WAITING_FOR_DATA', int),
('COMPLETE', int),
],
)
chunkParserStates = ChunkParserStates(1, 2, 3)
class ChunkParser:
"""HTTP chunked encoding response parser."""
def __init__(self) -> None:
self.state = chunkParserStates.WAITING_FOR_SIZE
self.body: bytes = b'' # Parsed chunks
self.chunk: bytes = b'' # Partial chunk received
# Expected size of next following chunk
self.size: Optional[int] = None
def parse(self, raw: bytes) -> bytes:
more = len(raw) > 0
while more and self.state != chunkParserStates.COMPLETE:
more, raw = self.process(raw)
return raw
def process(self, raw: bytes) -> Tuple[bool, bytes]:
if self.state == chunkParserStates.WAITING_FOR_SIZE:
# Consume prior chunk in buffer
# in case chunk size without CRLF was received
raw = self.chunk + raw
self.chunk = b''
# Extract following chunk data size
line, raw = find_http_line(raw)
# CRLF not received or Blank line was received.
if line is None or line.strip() == b'':
self.chunk = raw
raw = b''
else:
self.size = int(line, 16)
self.state = chunkParserStates.WAITING_FOR_DATA
elif self.state == chunkParserStates.WAITING_FOR_DATA:
assert self.size is not None
remaining = self.size - len(self.chunk)
self.chunk += raw[:remaining]
raw = raw[remaining:]
if len(self.chunk) == self.size:
raw = raw[len(CRLF):]
self.body += self.chunk
if self.size == 0:
self.state = chunkParserStates.COMPLETE
else:
self.state = chunkParserStates.WAITING_FOR_SIZE
self.chunk = b''
self.size = None
return len(raw) > 0, raw
@staticmethod
def to_chunks(raw: bytes, chunk_size: int = DEFAULT_BUFFER_SIZE) -> bytes:
chunks: List[bytes] = []
for i in range(0, len(raw), chunk_size):
chunk = raw[i: i + chunk_size]
chunks.append(bytes_('{:x}'.format(len(chunk))))
chunks.append(chunk)
chunks.append(bytes_('{:x}'.format(0)))
chunks.append(b'')
return CRLF.join(chunks) + CRLF
| <filename>proxy/http/chunk_parser.py
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import NamedTuple, Tuple, List, Optional
from ..common.utils import bytes_, find_http_line
from ..common.constants import CRLF, DEFAULT_BUFFER_SIZE
ChunkParserStates = NamedTuple(
'ChunkParserStates', [
('WAITING_FOR_SIZE', int),
('WAITING_FOR_DATA', int),
('COMPLETE', int),
],
)
chunkParserStates = ChunkParserStates(1, 2, 3)
class ChunkParser:
"""HTTP chunked encoding response parser."""
def __init__(self) -> None:
self.state = chunkParserStates.WAITING_FOR_SIZE
self.body: bytes = b'' # Parsed chunks
self.chunk: bytes = b'' # Partial chunk received
# Expected size of next following chunk
self.size: Optional[int] = None
def parse(self, raw: bytes) -> bytes:
more = len(raw) > 0
while more and self.state != chunkParserStates.COMPLETE:
more, raw = self.process(raw)
return raw
def process(self, raw: bytes) -> Tuple[bool, bytes]:
if self.state == chunkParserStates.WAITING_FOR_SIZE:
# Consume prior chunk in buffer
# in case chunk size without CRLF was received
raw = self.chunk + raw
self.chunk = b''
# Extract following chunk data size
line, raw = find_http_line(raw)
# CRLF not received or Blank line was received.
if line is None or line.strip() == b'':
self.chunk = raw
raw = b''
else:
self.size = int(line, 16)
self.state = chunkParserStates.WAITING_FOR_DATA
elif self.state == chunkParserStates.WAITING_FOR_DATA:
assert self.size is not None
remaining = self.size - len(self.chunk)
self.chunk += raw[:remaining]
raw = raw[remaining:]
if len(self.chunk) == self.size:
raw = raw[len(CRLF):]
self.body += self.chunk
if self.size == 0:
self.state = chunkParserStates.COMPLETE
else:
self.state = chunkParserStates.WAITING_FOR_SIZE
self.chunk = b''
self.size = None
return len(raw) > 0, raw
@staticmethod
def to_chunks(raw: bytes, chunk_size: int = DEFAULT_BUFFER_SIZE) -> bytes:
chunks: List[bytes] = []
for i in range(0, len(raw), chunk_size):
chunk = raw[i: i + chunk_size]
chunks.append(bytes_('{:x}'.format(len(chunk))))
chunks.append(chunk)
chunks.append(bytes_('{:x}'.format(0)))
chunks.append(b'')
return CRLF.join(chunks) + CRLF
| en | 0.933057 | # -*- coding: utf-8 -*- proxy.py ~~~~~~~~ ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on Network monitoring, controls & Application development, testing, debugging. :copyright: (c) 2013-present by <NAME> and contributors. :license: BSD, see LICENSE for more details. HTTP chunked encoding response parser. # Parsed chunks # Partial chunk received # Expected size of next following chunk # Consume prior chunk in buffer # in case chunk size without CRLF was received # Extract following chunk data size # CRLF not received or Blank line was received. | 2.677183 | 3 |
nova/pci/stats.py | 10088/nova | 0 | 718 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import typing as ty
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import utils
from nova.pci import whitelist
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(stephenfin): We might want to use TypedDict here. Refer to
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for
# more information.
Pool = ty.Dict[str, ty.Any]
class PciDeviceStats(object):
"""PCI devices summary information.
According to the PCI SR-IOV spec, a PCI physical function can have up to
256 PCI virtual functions, thus the number of assignable PCI functions in
a cloud can be big. The scheduler needs to know all device availability
information in order to determine which compute hosts can support a PCI
request. Passing individual virtual device information to the scheduler
does not scale, so we provide summary information.
Usually the virtual functions provided by a host PCI device have the same
value for most properties, like vendor_id, product_id and class type.
The PCI stats class summarizes this information for the scheduler.
The pci stats information is maintained exclusively by compute node
resource tracker and updated to database. The scheduler fetches the
information and selects the compute node accordingly. If a compute
node is selected, the resource tracker allocates the devices to the
instance and updates the pci stats information.
This summary information will be helpful for cloud management also.
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
def __init__(
self,
numa_topology: 'objects.NUMATopology',
stats: 'objects.PCIDevicePoolList' = None,
dev_filter: whitelist.Whitelist = None,
) -> None:
self.numa_topology = numa_topology
self.pools = (
[pci_pool.to_dict() for pci_pool in stats] if stats else []
)
self.pools.sort(key=lambda item: len(item))
self.dev_filter = dev_filter or whitelist.Whitelist(
CONF.pci.passthrough_whitelist)
def _equal_properties(
self, dev: Pool, entry: Pool, matching_keys: ty.List[str],
) -> bool:
return all(dev.get(prop) == entry.get(prop)
for prop in matching_keys)
def _find_pool(self, dev_pool: Pool) -> ty.Optional[Pool]:
"""Return the first pool that matches dev."""
for pool in self.pools:
pool_keys = pool.copy()
del pool_keys['count']
del pool_keys['devices']
if (len(pool_keys.keys()) == len(dev_pool.keys()) and
self._equal_properties(dev_pool, pool_keys, list(dev_pool))):
return pool
return None
@staticmethod
def _ensure_remote_managed_tag(
dev: 'objects.PciDevice', pool: Pool):
"""Add a remote_managed tag depending on a device type if needed.
Network devices may be managed remotely, e.g. by a SmartNIC DPU. If
a tag has not been explicitly provided, populate it by assuming that
a device is not remote managed by default.
"""
if dev.dev_type not in (fields.PciDeviceType.SRIOV_VF,
fields.PciDeviceType.SRIOV_PF,
fields.PciDeviceType.VDPA):
return
# A tag is added here rather than at the client side to avoid an
# issue with having objects without this tag specified during an
# upgrade to the first version that supports handling this tag.
if pool.get(PCI_REMOTE_MANAGED_TAG) is None:
# NOTE: tags are compared as strings case-insensitively, see
# pci_device_prop_match in nova/pci/utils.py.
pool[PCI_REMOTE_MANAGED_TAG] = 'false'
def _create_pool_keys_from_dev(
self, dev: 'objects.PciDevice',
) -> ty.Optional[Pool]:
"""Create a stats pool dict that this dev is supposed to be part of
Note that this pool dict contains the stats pool's keys and their
values. 'count' and 'devices' are not included.
"""
# Don't add a device that doesn't have a matching device spec.
# This can happen during initial sync up with the controller
devspec = self.dev_filter.get_devspec(dev)
if not devspec:
return None
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
if tags:
pool.update(tags)
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
# by the virt driver.
# This key is used for match InstancePciRequest backed by neutron ports
# that has resource_request and therefore that has resource allocation
# already in placement.
if dev.extra_info.get('parent_ifname'):
pool['parent_ifname'] = dev.extra_info['parent_ifname']
self._ensure_remote_managed_tag(dev, pool)
return pool
def _get_pool_with_device_type_mismatch(
self, dev: 'objects.PciDevice',
) -> ty.Optional[ty.Tuple[Pool, 'objects.PciDevice']]:
"""Check for device type mismatch in the pools for a given device.
Return (pool, device) if device type does not match or a single None
if the device type matches.
"""
for pool in self.pools:
for device in pool['devices']:
if device.address == dev.address:
if dev.dev_type != pool["dev_type"]:
return pool, device
return None
return None
def update_device(self, dev: 'objects.PciDevice') -> None:
"""Update a device to its matching pool."""
pool_device_info = self._get_pool_with_device_type_mismatch(dev)
if pool_device_info is None:
return None
pool, device = pool_device_info
pool['devices'].remove(device)
self._decrease_pool_count(self.pools, pool)
self.add_device(dev)
def add_device(self, dev: 'objects.PciDevice') -> None:
"""Add a device to its matching pool."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
dev_pool['count'] = 0
dev_pool['devices'] = []
self.pools.append(dev_pool)
self.pools.sort(key=lambda item: len(item))
pool = dev_pool
pool['count'] += 1
pool['devices'].append(dev)
@staticmethod
def _decrease_pool_count(
pool_list: ty.List[Pool], pool: Pool, count: int = 1,
) -> int:
"""Decrement pool's size by count.
If pool becomes empty, remove pool from pool_list.
"""
if pool['count'] > count:
pool['count'] -= count
count = 0
else:
count -= pool['count']
pool_list.remove(pool)
return count
def remove_device(self, dev: 'objects.PciDevice') -> None:
"""Remove one device from the first pool that it matches."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
raise exception.PciDevicePoolEmpty(
compute_node_id=dev.compute_node_id, address=dev.address)
pool['devices'].remove(dev)
self._decrease_pool_count(self.pools, pool)
def get_free_devs(self) -> ty.List['objects.PciDevice']:
free_devs: ty.List[objects.PciDevice] = []
for pool in self.pools:
free_devs.extend(pool['devices'])
return free_devs
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> ty.Optional[ty.List['objects.PciDevice']]:
alloc_devices: ty.List[objects.PciDevice] = []
for request in pci_requests:
count = request.count
pools = self._filter_pools(self.pools, request, numa_cells)
# Failed to allocate the required number of devices. Return the
# devices already allocated during previous iterations back to
# their pools
if not pools:
LOG.error("Failed to allocate PCI devices for instance. "
"Unassigning devices back to pools. "
"This should not happen, since the scheduler "
"should have accurate information, and allocation "
"during claims is controlled via a hold "
"on the compute node semaphore.")
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
return None
for pool in pools:
if pool['count'] >= count:
num_alloc = count
else:
num_alloc = pool['count']
count -= num_alloc
pool['count'] -= num_alloc
for d in range(num_alloc):
pci_dev = pool['devices'].pop()
self._handle_device_dependents(pci_dev)
pci_dev.request_id = request.request_id
alloc_devices.append(pci_dev)
if count == 0:
break
return alloc_devices
def _handle_device_dependents(self, pci_dev: 'objects.PciDevice') -> None:
"""Remove device dependents or a parent from pools.
In case the device is a PF, all of it's dependent VFs should
be removed from pools count, if these are present.
When the device is a VF, or a VDPA device, it's parent PF
pool count should be decreased, unless it is no longer in a pool.
"""
if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF:
vfs_list = pci_dev.child_devices
if vfs_list:
free_devs = self.get_free_devs()
for vf in vfs_list:
# NOTE(gibi): do not try to remove a device that are
# already removed
if vf in free_devs:
self.remove_device(vf)
elif pci_dev.dev_type in (
fields.PciDeviceType.SRIOV_VF,
fields.PciDeviceType.VDPA,
):
try:
parent = pci_dev.parent_device
# Make sure not to decrease PF pool count if this parent has
# been already removed from pools
if parent in self.get_free_devs():
self.remove_device(parent)
except exception.PciDeviceNotFound:
return
def _filter_pools_for_spec(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools that don't match the request's device spec.
Exclude pools that do not match the specified ``vendor_id``,
``product_id`` and/or ``device_type`` field, or any of the other
arbitrary tags such as ``physical_network``, specified in the request.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
request_specs = request.spec
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
]
def _filter_pools_for_numa_cells(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.List[Pool]:
"""Filter out pools with the wrong NUMA affinity, if required.
Exclude pools that do not have *suitable* PCI NUMA affinity.
``numa_policy`` determines what *suitable* means, being one of
PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED
(must-have). We iterate through the various policies in order of
strictness. This means that even if we only *prefer* PCI-NUMA affinity,
we will still attempt to provide it if possible.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells.
:returns: A list of pools that can, together, provide at least
``requested_count`` PCI devices with the level of NUMA affinity
required by ``numa_policy``, else all pools that can satisfy this
policy even if it's not enough.
"""
if not numa_cells:
return pools
# we default to the 'legacy' policy for...of course...legacy reasons
requested_policy = fields.PCINUMAAffinityPolicy.LEGACY
if 'numa_policy' in request:
requested_policy = request.numa_policy or requested_policy
requested_count = request.count
numa_cell_ids = [cell.id for cell in numa_cells]
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# we can't apply a less strict policy than the one requested, so we
# need to return if we've demanded a NUMA affinity of REQUIRED.
# However, NUMA affinity is a good thing. If we can get enough devices
# with the stricter policy then we will use them.
if requested_policy == fields.PCINUMAAffinityPolicy.REQUIRED or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# the SOCKET policy is a bit of a special case. It's less strict than
# REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least
# with our assumption of never having multiple sockets per NUMA node),
# but not always more strict than LEGACY: a PCI device with no NUMA
# affinity will fulfil LEGACY but not SOCKET. If we have SOCKET,
# process it here and don't continue.
if requested_policy == fields.PCINUMAAffinityPolicy.SOCKET:
return self._filter_pools_for_socket_affinity(pools, numa_cells)
# some systems don't report NUMA node info for PCI devices, in which
# case None is reported in 'pci_device.numa_node'. The LEGACY policy
# allows us to use these devices so we include None in the list of
# suitable NUMA cells.
numa_cell_ids.append(None)
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# once again, we can't apply a less strict policy than the one
# requested, so we need to return if we've demanded a NUMA affinity of
# LEGACY. Similarly, we will also return if we have enough devices to
# satisfy this somewhat strict policy.
if requested_policy == fields.PCINUMAAffinityPolicy.LEGACY or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# if we've got here, we're using the PREFERRED policy and weren't able
# to provide anything with stricter affinity. Use whatever devices you
# can, folks.
return sorted(
pools, key=lambda pool: pool.get('numa_node') not in numa_cell_ids)
def _filter_pools_for_socket_affinity(
self,
pools: ty.List[Pool],
numa_cells: ty.List['objects.InstanceNUMACell'],
) -> ty.List[Pool]:
host_cells = self.numa_topology.cells
# bail early if we don't have socket information for all host_cells.
# This could happen if we're running on an weird older system with
# multiple sockets per NUMA node, which is a configuration that we
# explicitly chose not to support.
if any(cell.socket is None for cell in host_cells):
LOG.debug('No socket information in host NUMA cell(s).')
return []
# get a set of host sockets that the guest cells are in. Since guest
# cell IDs map to host cell IDs, we can just lookup the latter's
# socket.
socket_ids = set()
for guest_cell in numa_cells:
for host_cell in host_cells:
if guest_cell.id == host_cell.id:
socket_ids.add(host_cell.socket)
# now get a set of host NUMA nodes that are in the above sockets
allowed_numa_nodes = set()
for host_cell in host_cells:
if host_cell.socket in socket_ids:
allowed_numa_nodes.add(host_cell.id)
# filter out pools that are not in one of the correct host NUMA nodes.
return [
pool for pool in pools if any(
utils.pci_device_prop_match(pool, [{'numa_node': numa_node}])
for numa_node in allowed_numa_nodes
)
]
def _filter_pools_for_unrequested_pfs(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with PFs, unless these are required.
This is necessary in cases where PFs and VFs have the same product_id
and generally useful elsewhere.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF
]
return pools
def _filter_pools_for_unrequested_vdpa_devices(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with VDPA devices, unless these are required.
This is necessary as vdpa devices require special handling and
should not be allocated to generic pci device requests.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.VDPA
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.VDPA
]
return pools
def _filter_pools_for_unrequested_remote_managed_devices(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with remote_managed devices, unless requested.
Remote-managed devices are not usable for legacy SR-IOV or hardware
offload scenarios and must be excluded from allocation.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG))
for spec in request.spec):
pools = [pool for pool in pools
if not strutils.bool_from_string(
pool.get(PCI_REMOTE_MANAGED_TAG))]
return pools
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
Filter pools, which are collections of devices with similar traits, to
identify those that can support the provided PCI request.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``request.numa_policy``.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
# NOTE(vladikr): This code may be open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
# Firstly, let's exclude all devices that don't match our spec (e.g.
# they've got different PCI IDs or something)
before_count = sum([pool['count'] for pool in pools])
pools = self._filter_pools_for_spec(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) due to mismatched PCI attribute(s)',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# Next, let's exclude all devices that aren't on the correct NUMA node
# or socket, *assuming* we have devices and care about that, as
# determined by policy
before_count = after_count
pools = self._filter_pools_for_numa_cells(pools, request, numa_cells)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are on the wrong NUMA node(s)',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# If we're not requesting PFs then we should not use these.
# Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_pfs(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are PFs which we have not '
'requested',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# If we're not requesting VDPA devices then we should not use these
# either. Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_vdpa_devices(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are VDPA devices which we have '
'not requested',
before_count - after_count
)
# If we're not requesting remote_managed devices then we should not
# use these either. Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_remote_managed_devices(
pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are remote-managed devices which'
'we have not requested',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
return pools
def support_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Determine if the PCI requests can be met.
Determine, based on a compute node's PCI stats, if an instance can be
scheduled on the node. **Support does not mean real allocation**.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``numa_policy``.
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:returns: Whether this compute node can satisfy the given request.
"""
# NOTE(yjiang5): this function has high possibility to fail,
# so no exception should be triggered for performance reason.
return all(
self._filter_pools(self.pools, r, numa_cells) for r in requests
)
def _apply_request(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
Apply a PCI request against a given set of PCI device pools, which are
collections of devices with similar traits.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``request.numa_policy``.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: True if the request was applied against the provided pools
successfully, else False.
"""
# NOTE(vladikr): This code maybe open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
filtered_pools = self._filter_pools(pools, request, numa_cells)
if not filtered_pools:
return False
count = request.count
for pool in filtered_pools:
count = self._decrease_pool_count(pools, pool, count)
if not count:
break
return True
def apply_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> None:
"""Apply PCI requests to the PCI stats.
This is used in multiple instance creation, when the scheduler has to
maintain how the resources are consumed by the instances.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``numa_policy``.
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:raises: exception.PciDeviceRequestFailed if this compute node cannot
satisfy the given request.
"""
if not all(
self._apply_request(self.pools, r, numa_cells) for r in requests
):
raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self) -> ty.Iterator[Pool]:
pools: ty.List[Pool] = []
for pool in self.pools:
pool = copy.deepcopy(pool)
# 'devices' shouldn't be part of stats
if 'devices' in pool:
del pool['devices']
pools.append(pool)
return iter(pools)
def clear(self) -> None:
"""Clear all the stats maintained."""
self.pools = []
def __eq__(self, other: object) -> bool:
if not isinstance(other, PciDeviceStats):
return NotImplemented
return self.pools == other.pools
def to_device_pools_obj(self) -> 'objects.PciDevicePoolList':
"""Return the contents of the pools as a PciDevicePoolList object."""
stats = [x for x in self]
return pci_device_pool.from_pci_stats(stats)
def has_remote_managed_device_pools(self) -> bool:
"""Determine whether remote managed device pools are present on a host.
The check is pool-based, not free device-based and is NUMA cell
agnostic.
"""
dummy_req = objects.InstancePCIRequest(
count=0,
spec=[{'remote_managed': True}]
)
pools = self._filter_pools_for_spec(self.pools, dummy_req)
return bool(pools)
| # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import typing as ty
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import utils
from nova.pci import whitelist
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(stephenfin): We might want to use TypedDict here. Refer to
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for
# more information.
Pool = ty.Dict[str, ty.Any]
class PciDeviceStats(object):
"""PCI devices summary information.
According to the PCI SR-IOV spec, a PCI physical function can have up to
256 PCI virtual functions, thus the number of assignable PCI functions in
a cloud can be big. The scheduler needs to know all device availability
information in order to determine which compute hosts can support a PCI
request. Passing individual virtual device information to the scheduler
does not scale, so we provide summary information.
Usually the virtual functions provided by a host PCI device have the same
value for most properties, like vendor_id, product_id and class type.
The PCI stats class summarizes this information for the scheduler.
The pci stats information is maintained exclusively by compute node
resource tracker and updated to database. The scheduler fetches the
information and selects the compute node accordingly. If a compute
node is selected, the resource tracker allocates the devices to the
instance and updates the pci stats information.
This summary information will be helpful for cloud management also.
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
def __init__(
self,
numa_topology: 'objects.NUMATopology',
stats: 'objects.PCIDevicePoolList' = None,
dev_filter: whitelist.Whitelist = None,
) -> None:
self.numa_topology = numa_topology
self.pools = (
[pci_pool.to_dict() for pci_pool in stats] if stats else []
)
self.pools.sort(key=lambda item: len(item))
self.dev_filter = dev_filter or whitelist.Whitelist(
CONF.pci.passthrough_whitelist)
def _equal_properties(
self, dev: Pool, entry: Pool, matching_keys: ty.List[str],
) -> bool:
return all(dev.get(prop) == entry.get(prop)
for prop in matching_keys)
def _find_pool(self, dev_pool: Pool) -> ty.Optional[Pool]:
"""Return the first pool that matches dev."""
for pool in self.pools:
pool_keys = pool.copy()
del pool_keys['count']
del pool_keys['devices']
if (len(pool_keys.keys()) == len(dev_pool.keys()) and
self._equal_properties(dev_pool, pool_keys, list(dev_pool))):
return pool
return None
@staticmethod
def _ensure_remote_managed_tag(
dev: 'objects.PciDevice', pool: Pool):
"""Add a remote_managed tag depending on a device type if needed.
Network devices may be managed remotely, e.g. by a SmartNIC DPU. If
a tag has not been explicitly provided, populate it by assuming that
a device is not remote managed by default.
"""
if dev.dev_type not in (fields.PciDeviceType.SRIOV_VF,
fields.PciDeviceType.SRIOV_PF,
fields.PciDeviceType.VDPA):
return
# A tag is added here rather than at the client side to avoid an
# issue with having objects without this tag specified during an
# upgrade to the first version that supports handling this tag.
if pool.get(PCI_REMOTE_MANAGED_TAG) is None:
# NOTE: tags are compared as strings case-insensitively, see
# pci_device_prop_match in nova/pci/utils.py.
pool[PCI_REMOTE_MANAGED_TAG] = 'false'
def _create_pool_keys_from_dev(
self, dev: 'objects.PciDevice',
) -> ty.Optional[Pool]:
"""Create a stats pool dict that this dev is supposed to be part of
Note that this pool dict contains the stats pool's keys and their
values. 'count' and 'devices' are not included.
"""
# Don't add a device that doesn't have a matching device spec.
# This can happen during initial sync up with the controller
devspec = self.dev_filter.get_devspec(dev)
if not devspec:
return None
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
if tags:
pool.update(tags)
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
# by the virt driver.
# This key is used for match InstancePciRequest backed by neutron ports
# that has resource_request and therefore that has resource allocation
# already in placement.
if dev.extra_info.get('parent_ifname'):
pool['parent_ifname'] = dev.extra_info['parent_ifname']
self._ensure_remote_managed_tag(dev, pool)
return pool
def _get_pool_with_device_type_mismatch(
self, dev: 'objects.PciDevice',
) -> ty.Optional[ty.Tuple[Pool, 'objects.PciDevice']]:
"""Check for device type mismatch in the pools for a given device.
Return (pool, device) if device type does not match or a single None
if the device type matches.
"""
for pool in self.pools:
for device in pool['devices']:
if device.address == dev.address:
if dev.dev_type != pool["dev_type"]:
return pool, device
return None
return None
def update_device(self, dev: 'objects.PciDevice') -> None:
"""Update a device to its matching pool."""
pool_device_info = self._get_pool_with_device_type_mismatch(dev)
if pool_device_info is None:
return None
pool, device = pool_device_info
pool['devices'].remove(device)
self._decrease_pool_count(self.pools, pool)
self.add_device(dev)
def add_device(self, dev: 'objects.PciDevice') -> None:
"""Add a device to its matching pool."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
dev_pool['count'] = 0
dev_pool['devices'] = []
self.pools.append(dev_pool)
self.pools.sort(key=lambda item: len(item))
pool = dev_pool
pool['count'] += 1
pool['devices'].append(dev)
@staticmethod
def _decrease_pool_count(
pool_list: ty.List[Pool], pool: Pool, count: int = 1,
) -> int:
"""Decrement pool's size by count.
If pool becomes empty, remove pool from pool_list.
"""
if pool['count'] > count:
pool['count'] -= count
count = 0
else:
count -= pool['count']
pool_list.remove(pool)
return count
def remove_device(self, dev: 'objects.PciDevice') -> None:
"""Remove one device from the first pool that it matches."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
raise exception.PciDevicePoolEmpty(
compute_node_id=dev.compute_node_id, address=dev.address)
pool['devices'].remove(dev)
self._decrease_pool_count(self.pools, pool)
def get_free_devs(self) -> ty.List['objects.PciDevice']:
free_devs: ty.List[objects.PciDevice] = []
for pool in self.pools:
free_devs.extend(pool['devices'])
return free_devs
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> ty.Optional[ty.List['objects.PciDevice']]:
alloc_devices: ty.List[objects.PciDevice] = []
for request in pci_requests:
count = request.count
pools = self._filter_pools(self.pools, request, numa_cells)
# Failed to allocate the required number of devices. Return the
# devices already allocated during previous iterations back to
# their pools
if not pools:
LOG.error("Failed to allocate PCI devices for instance. "
"Unassigning devices back to pools. "
"This should not happen, since the scheduler "
"should have accurate information, and allocation "
"during claims is controlled via a hold "
"on the compute node semaphore.")
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
return None
for pool in pools:
if pool['count'] >= count:
num_alloc = count
else:
num_alloc = pool['count']
count -= num_alloc
pool['count'] -= num_alloc
for d in range(num_alloc):
pci_dev = pool['devices'].pop()
self._handle_device_dependents(pci_dev)
pci_dev.request_id = request.request_id
alloc_devices.append(pci_dev)
if count == 0:
break
return alloc_devices
def _handle_device_dependents(self, pci_dev: 'objects.PciDevice') -> None:
"""Remove device dependents or a parent from pools.
In case the device is a PF, all of it's dependent VFs should
be removed from pools count, if these are present.
When the device is a VF, or a VDPA device, it's parent PF
pool count should be decreased, unless it is no longer in a pool.
"""
if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF:
vfs_list = pci_dev.child_devices
if vfs_list:
free_devs = self.get_free_devs()
for vf in vfs_list:
# NOTE(gibi): do not try to remove a device that are
# already removed
if vf in free_devs:
self.remove_device(vf)
elif pci_dev.dev_type in (
fields.PciDeviceType.SRIOV_VF,
fields.PciDeviceType.VDPA,
):
try:
parent = pci_dev.parent_device
# Make sure not to decrease PF pool count if this parent has
# been already removed from pools
if parent in self.get_free_devs():
self.remove_device(parent)
except exception.PciDeviceNotFound:
return
def _filter_pools_for_spec(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools that don't match the request's device spec.
Exclude pools that do not match the specified ``vendor_id``,
``product_id`` and/or ``device_type`` field, or any of the other
arbitrary tags such as ``physical_network``, specified in the request.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
request_specs = request.spec
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
]
def _filter_pools_for_numa_cells(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.List[Pool]:
"""Filter out pools with the wrong NUMA affinity, if required.
Exclude pools that do not have *suitable* PCI NUMA affinity.
``numa_policy`` determines what *suitable* means, being one of
PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED
(must-have). We iterate through the various policies in order of
strictness. This means that even if we only *prefer* PCI-NUMA affinity,
we will still attempt to provide it if possible.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells.
:returns: A list of pools that can, together, provide at least
``requested_count`` PCI devices with the level of NUMA affinity
required by ``numa_policy``, else all pools that can satisfy this
policy even if it's not enough.
"""
if not numa_cells:
return pools
# we default to the 'legacy' policy for...of course...legacy reasons
requested_policy = fields.PCINUMAAffinityPolicy.LEGACY
if 'numa_policy' in request:
requested_policy = request.numa_policy or requested_policy
requested_count = request.count
numa_cell_ids = [cell.id for cell in numa_cells]
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# we can't apply a less strict policy than the one requested, so we
# need to return if we've demanded a NUMA affinity of REQUIRED.
# However, NUMA affinity is a good thing. If we can get enough devices
# with the stricter policy then we will use them.
if requested_policy == fields.PCINUMAAffinityPolicy.REQUIRED or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# the SOCKET policy is a bit of a special case. It's less strict than
# REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least
# with our assumption of never having multiple sockets per NUMA node),
# but not always more strict than LEGACY: a PCI device with no NUMA
# affinity will fulfil LEGACY but not SOCKET. If we have SOCKET,
# process it here and don't continue.
if requested_policy == fields.PCINUMAAffinityPolicy.SOCKET:
return self._filter_pools_for_socket_affinity(pools, numa_cells)
# some systems don't report NUMA node info for PCI devices, in which
# case None is reported in 'pci_device.numa_node'. The LEGACY policy
# allows us to use these devices so we include None in the list of
# suitable NUMA cells.
numa_cell_ids.append(None)
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# once again, we can't apply a less strict policy than the one
# requested, so we need to return if we've demanded a NUMA affinity of
# LEGACY. Similarly, we will also return if we have enough devices to
# satisfy this somewhat strict policy.
if requested_policy == fields.PCINUMAAffinityPolicy.LEGACY or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# if we've got here, we're using the PREFERRED policy and weren't able
# to provide anything with stricter affinity. Use whatever devices you
# can, folks.
return sorted(
pools, key=lambda pool: pool.get('numa_node') not in numa_cell_ids)
def _filter_pools_for_socket_affinity(
self,
pools: ty.List[Pool],
numa_cells: ty.List['objects.InstanceNUMACell'],
) -> ty.List[Pool]:
host_cells = self.numa_topology.cells
# bail early if we don't have socket information for all host_cells.
# This could happen if we're running on an weird older system with
# multiple sockets per NUMA node, which is a configuration that we
# explicitly chose not to support.
if any(cell.socket is None for cell in host_cells):
LOG.debug('No socket information in host NUMA cell(s).')
return []
# get a set of host sockets that the guest cells are in. Since guest
# cell IDs map to host cell IDs, we can just lookup the latter's
# socket.
socket_ids = set()
for guest_cell in numa_cells:
for host_cell in host_cells:
if guest_cell.id == host_cell.id:
socket_ids.add(host_cell.socket)
# now get a set of host NUMA nodes that are in the above sockets
allowed_numa_nodes = set()
for host_cell in host_cells:
if host_cell.socket in socket_ids:
allowed_numa_nodes.add(host_cell.id)
# filter out pools that are not in one of the correct host NUMA nodes.
return [
pool for pool in pools if any(
utils.pci_device_prop_match(pool, [{'numa_node': numa_node}])
for numa_node in allowed_numa_nodes
)
]
def _filter_pools_for_unrequested_pfs(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with PFs, unless these are required.
This is necessary in cases where PFs and VFs have the same product_id
and generally useful elsewhere.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF
]
return pools
def _filter_pools_for_unrequested_vdpa_devices(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with VDPA devices, unless these are required.
This is necessary as vdpa devices require special handling and
should not be allocated to generic pci device requests.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.VDPA
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.VDPA
]
return pools
def _filter_pools_for_unrequested_remote_managed_devices(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with remote_managed devices, unless requested.
Remote-managed devices are not usable for legacy SR-IOV or hardware
offload scenarios and must be excluded from allocation.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG))
for spec in request.spec):
pools = [pool for pool in pools
if not strutils.bool_from_string(
pool.get(PCI_REMOTE_MANAGED_TAG))]
return pools
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
Filter pools, which are collections of devices with similar traits, to
identify those that can support the provided PCI request.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``request.numa_policy``.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
# NOTE(vladikr): This code may be open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
# Firstly, let's exclude all devices that don't match our spec (e.g.
# they've got different PCI IDs or something)
before_count = sum([pool['count'] for pool in pools])
pools = self._filter_pools_for_spec(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) due to mismatched PCI attribute(s)',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# Next, let's exclude all devices that aren't on the correct NUMA node
# or socket, *assuming* we have devices and care about that, as
# determined by policy
before_count = after_count
pools = self._filter_pools_for_numa_cells(pools, request, numa_cells)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are on the wrong NUMA node(s)',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# If we're not requesting PFs then we should not use these.
# Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_pfs(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are PFs which we have not '
'requested',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# If we're not requesting VDPA devices then we should not use these
# either. Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_vdpa_devices(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are VDPA devices which we have '
'not requested',
before_count - after_count
)
# If we're not requesting remote_managed devices then we should not
# use these either. Exclude them.
before_count = after_count
pools = self._filter_pools_for_unrequested_remote_managed_devices(
pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) as they are remote-managed devices which'
'we have not requested',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
return pools
def support_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Determine if the PCI requests can be met.
Determine, based on a compute node's PCI stats, if an instance can be
scheduled on the node. **Support does not mean real allocation**.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``numa_policy``.
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:returns: Whether this compute node can satisfy the given request.
"""
# NOTE(yjiang5): this function has high possibility to fail,
# so no exception should be triggered for performance reason.
return all(
self._filter_pools(self.pools, r, numa_cells) for r in requests
)
def _apply_request(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
Apply a PCI request against a given set of PCI device pools, which are
collections of devices with similar traits.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``request.numa_policy``.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: True if the request was applied against the provided pools
successfully, else False.
"""
# NOTE(vladikr): This code maybe open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
filtered_pools = self._filter_pools(pools, request, numa_cells)
if not filtered_pools:
return False
count = request.count
for pool in filtered_pools:
count = self._decrease_pool_count(pools, pool, count)
if not count:
break
return True
def apply_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> None:
"""Apply PCI requests to the PCI stats.
This is used in multiple instance creation, when the scheduler has to
maintain how the resources are consumed by the instances.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``numa_policy``.
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:raises: exception.PciDeviceRequestFailed if this compute node cannot
satisfy the given request.
"""
if not all(
self._apply_request(self.pools, r, numa_cells) for r in requests
):
raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self) -> ty.Iterator[Pool]:
pools: ty.List[Pool] = []
for pool in self.pools:
pool = copy.deepcopy(pool)
# 'devices' shouldn't be part of stats
if 'devices' in pool:
del pool['devices']
pools.append(pool)
return iter(pools)
def clear(self) -> None:
"""Clear all the stats maintained."""
self.pools = []
def __eq__(self, other: object) -> bool:
if not isinstance(other, PciDeviceStats):
return NotImplemented
return self.pools == other.pools
def to_device_pools_obj(self) -> 'objects.PciDevicePoolList':
"""Return the contents of the pools as a PciDevicePoolList object."""
stats = [x for x in self]
return pci_device_pool.from_pci_stats(stats)
def has_remote_managed_device_pools(self) -> bool:
"""Determine whether remote managed device pools are present on a host.
The check is pool-based, not free device-based and is NUMA cell
agnostic.
"""
dummy_req = objects.InstancePCIRequest(
count=0,
spec=[{'remote_managed': True}]
)
pools = self._filter_pools_for_spec(self.pools, dummy_req)
return bool(pools)
| en | 0.887211 | # Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): We might want to use TypedDict here. Refer to # https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for # more information. PCI devices summary information. According to the PCI SR-IOV spec, a PCI physical function can have up to 256 PCI virtual functions, thus the number of assignable PCI functions in a cloud can be big. The scheduler needs to know all device availability information in order to determine which compute hosts can support a PCI request. Passing individual virtual device information to the scheduler does not scale, so we provide summary information. Usually the virtual functions provided by a host PCI device have the same value for most properties, like vendor_id, product_id and class type. The PCI stats class summarizes this information for the scheduler. The pci stats information is maintained exclusively by compute node resource tracker and updated to database. The scheduler fetches the information and selects the compute node accordingly. If a compute node is selected, the resource tracker allocates the devices to the instance and updates the pci stats information. This summary information will be helpful for cloud management also. Return the first pool that matches dev. Add a remote_managed tag depending on a device type if needed. Network devices may be managed remotely, e.g. by a SmartNIC DPU. If a tag has not been explicitly provided, populate it by assuming that a device is not remote managed by default. # A tag is added here rather than at the client side to avoid an # issue with having objects without this tag specified during an # upgrade to the first version that supports handling this tag. # NOTE: tags are compared as strings case-insensitively, see # pci_device_prop_match in nova/pci/utils.py. Create a stats pool dict that this dev is supposed to be part of Note that this pool dict contains the stats pool's keys and their values. 'count' and 'devices' are not included. # Don't add a device that doesn't have a matching device spec. # This can happen during initial sync up with the controller # NOTE(gibi): parent_ifname acts like a tag during pci claim but # not provided as part of the whitelist spec as it is auto detected # by the virt driver. # This key is used for match InstancePciRequest backed by neutron ports # that has resource_request and therefore that has resource allocation # already in placement. Check for device type mismatch in the pools for a given device. Return (pool, device) if device type does not match or a single None if the device type matches. Update a device to its matching pool. Add a device to its matching pool. Decrement pool's size by count. If pool becomes empty, remove pool from pool_list. Remove one device from the first pool that it matches. # Failed to allocate the required number of devices. Return the # devices already allocated during previous iterations back to # their pools Remove device dependents or a parent from pools. In case the device is a PF, all of it's dependent VFs should be removed from pools count, if these are present. When the device is a VF, or a VDPA device, it's parent PF pool count should be decreased, unless it is no longer in a pool. # NOTE(gibi): do not try to remove a device that are # already removed # Make sure not to decrease PF pool count if this parent has # been already removed from pools Filter out pools that don't match the request's device spec. Exclude pools that do not match the specified ``vendor_id``, ``product_id`` and/or ``device_type`` field, or any of the other arbitrary tags such as ``physical_network``, specified in the request. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. Filter out pools with the wrong NUMA affinity, if required. Exclude pools that do not have *suitable* PCI NUMA affinity. ``numa_policy`` determines what *suitable* means, being one of PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED (must-have). We iterate through the various policies in order of strictness. This means that even if we only *prefer* PCI-NUMA affinity, we will still attempt to provide it if possible. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells. :returns: A list of pools that can, together, provide at least ``requested_count`` PCI devices with the level of NUMA affinity required by ``numa_policy``, else all pools that can satisfy this policy even if it's not enough. # we default to the 'legacy' policy for...of course...legacy reasons # filter out pools which numa_node is not included in numa_cell_ids # we can't apply a less strict policy than the one requested, so we # need to return if we've demanded a NUMA affinity of REQUIRED. # However, NUMA affinity is a good thing. If we can get enough devices # with the stricter policy then we will use them. # the SOCKET policy is a bit of a special case. It's less strict than # REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least # with our assumption of never having multiple sockets per NUMA node), # but not always more strict than LEGACY: a PCI device with no NUMA # affinity will fulfil LEGACY but not SOCKET. If we have SOCKET, # process it here and don't continue. # some systems don't report NUMA node info for PCI devices, in which # case None is reported in 'pci_device.numa_node'. The LEGACY policy # allows us to use these devices so we include None in the list of # suitable NUMA cells. # filter out pools which numa_node is not included in numa_cell_ids # once again, we can't apply a less strict policy than the one # requested, so we need to return if we've demanded a NUMA affinity of # LEGACY. Similarly, we will also return if we have enough devices to # satisfy this somewhat strict policy. # if we've got here, we're using the PREFERRED policy and weren't able # to provide anything with stricter affinity. Use whatever devices you # can, folks. # bail early if we don't have socket information for all host_cells. # This could happen if we're running on an weird older system with # multiple sockets per NUMA node, which is a configuration that we # explicitly chose not to support. # get a set of host sockets that the guest cells are in. Since guest # cell IDs map to host cell IDs, we can just lookup the latter's # socket. # now get a set of host NUMA nodes that are in the above sockets # filter out pools that are not in one of the correct host NUMA nodes. Filter out pools with PFs, unless these are required. This is necessary in cases where PFs and VFs have the same product_id and generally useful elsewhere. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. Filter out pools with VDPA devices, unless these are required. This is necessary as vdpa devices require special handling and should not be allocated to generic pci device requests. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. Filter out pools with remote_managed devices, unless requested. Remote-managed devices are not usable for legacy SR-IOV or hardware offload scenarios and must be excluded from allocation. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. Determine if an individual PCI request can be met. Filter pools, which are collections of devices with similar traits, to identify those that can support the provided PCI request. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :returns: A list of pools that can be used to support the request if this is possible, else None. # NOTE(vladikr): This code may be open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools # Firstly, let's exclude all devices that don't match our spec (e.g. # they've got different PCI IDs or something) # Next, let's exclude all devices that aren't on the correct NUMA node # or socket, *assuming* we have devices and care about that, as # determined by policy # If we're not requesting PFs then we should not use these. # Exclude them. # If we're not requesting VDPA devices then we should not use these # either. Exclude them. # If we're not requesting remote_managed devices then we should not # use these either. Exclude them. Determine if the PCI requests can be met. Determine, based on a compute node's PCI stats, if an instance can be scheduled on the node. **Support does not mean real allocation**. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :returns: Whether this compute node can satisfy the given request. # NOTE(yjiang5): this function has high possibility to fail, # so no exception should be triggered for performance reason. Apply an individual PCI request. Apply a PCI request against a given set of PCI device pools, which are collections of devices with similar traits. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :returns: True if the request was applied against the provided pools successfully, else False. # NOTE(vladikr): This code maybe open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools Apply PCI requests to the PCI stats. This is used in multiple instance creation, when the scheduler has to maintain how the resources are consumed by the instances. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :raises: exception.PciDeviceRequestFailed if this compute node cannot satisfy the given request. # 'devices' shouldn't be part of stats Clear all the stats maintained. Return the contents of the pools as a PciDevicePoolList object. Determine whether remote managed device pools are present on a host. The check is pool-based, not free device-based and is NUMA cell agnostic. | 1.720545 | 2 |
Use.py | Codingprivacy/Multiple-Rename | 2 | 719 | import multiple
multiple.rename("C:/Users/Username/Desktop",'new_name',33,'.exe')
"""this above lines renames all the files of the folder Desktop to 'new_name' and
count starts from 33 to further (we can also provide 1 to start it from 1) and
extension is given '.exe'
hence the files will be renamed like :
1. new_name33.exe
2. new_name34.exe and so on
""" | import multiple
multiple.rename("C:/Users/Username/Desktop",'new_name',33,'.exe')
"""this above lines renames all the files of the folder Desktop to 'new_name' and
count starts from 33 to further (we can also provide 1 to start it from 1) and
extension is given '.exe'
hence the files will be renamed like :
1. new_name33.exe
2. new_name34.exe and so on
""" | en | 0.90048 | this above lines renames all the files of the folder Desktop to 'new_name' and count starts from 33 to further (we can also provide 1 to start it from 1) and extension is given '.exe' hence the files will be renamed like : 1. new_name33.exe 2. new_name34.exe and so on | 3.325865 | 3 |
ReadSymLink.py | ohel/pyorbital-gizmod-tweaks | 0 | 720 | import os
def readlinkabs(l):
"""
Return an absolute path for the destination
of a symlink
"""
if not (os.path.islink(l)):
return None
p = os.readlink(l)
if os.path.isabs(p):
return p
return os.path.join(os.path.dirname(l), p)
| import os
def readlinkabs(l):
"""
Return an absolute path for the destination
of a symlink
"""
if not (os.path.islink(l)):
return None
p = os.readlink(l)
if os.path.isabs(p):
return p
return os.path.join(os.path.dirname(l), p)
| en | 0.602798 | Return an absolute path for the destination of a symlink | 2.87863 | 3 |
examples/capture_circular.py | IanTBlack/picamera2 | 71 | 721 | <filename>examples/capture_circular.py<gh_stars>10-100
#!/usr/bin/python3
import time
import numpy as np
from picamera2.encoders import H264Encoder
from picamera2.outputs import CircularOutput
from picamera2 import Picamera2
lsize = (320, 240)
picam2 = Picamera2()
video_config = picam2.video_configuration(main={"size": (1280, 720), "format": "RGB888"},
lores={"size": lsize, "format": "YUV420"})
picam2.configure(video_config)
picam2.start_preview()
encoder = H264Encoder(1000000, repeat=True)
encoder.output = CircularOutput()
picam2.encoder = encoder
picam2.start()
picam2.start_encoder()
w, h = lsize
prev = None
encoding = False
ltime = 0
while True:
cur = picam2.capture_buffer("lores")
cur = cur[:w * h].reshape(h, w)
if prev is not None:
# Measure pixels differences between current and
# previous frame
mse = np.square(np.subtract(cur, prev)).mean()
if mse > 7:
if not encoding:
epoch = int(time.time())
encoder.output.fileoutput = "{}.h264".format(epoch)
encoder.output.start()
encoding = True
print("New Motion", mse)
ltime = time.time()
else:
if encoding and time.time() - ltime > 5.0:
encoder.output.stop()
encoding = False
prev = cur
picam2.stop_encoder()
| <filename>examples/capture_circular.py<gh_stars>10-100
#!/usr/bin/python3
import time
import numpy as np
from picamera2.encoders import H264Encoder
from picamera2.outputs import CircularOutput
from picamera2 import Picamera2
lsize = (320, 240)
picam2 = Picamera2()
video_config = picam2.video_configuration(main={"size": (1280, 720), "format": "RGB888"},
lores={"size": lsize, "format": "YUV420"})
picam2.configure(video_config)
picam2.start_preview()
encoder = H264Encoder(1000000, repeat=True)
encoder.output = CircularOutput()
picam2.encoder = encoder
picam2.start()
picam2.start_encoder()
w, h = lsize
prev = None
encoding = False
ltime = 0
while True:
cur = picam2.capture_buffer("lores")
cur = cur[:w * h].reshape(h, w)
if prev is not None:
# Measure pixels differences between current and
# previous frame
mse = np.square(np.subtract(cur, prev)).mean()
if mse > 7:
if not encoding:
epoch = int(time.time())
encoder.output.fileoutput = "{}.h264".format(epoch)
encoder.output.start()
encoding = True
print("New Motion", mse)
ltime = time.time()
else:
if encoding and time.time() - ltime > 5.0:
encoder.output.stop()
encoding = False
prev = cur
picam2.stop_encoder()
| en | 0.820865 | #!/usr/bin/python3 # Measure pixels differences between current and # previous frame | 2.820756 | 3 |
Bio/NeuralNetwork/Gene/Pattern.py | barendt/biopython | 3 | 722 | """Generic functionality useful for all gene representations.
This module contains classes which can be used for all the different
types of patterns available for representing gene information (ie. motifs,
signatures and schemas). These are the general classes which should be
handle any of the different specific patterns.
"""
# standard library
import random
# biopython
from Bio import utils
from Bio.Seq import Seq, MutableSeq
class PatternIO:
"""Allow reading and writing of patterns to files.
This just defines a simple persistance class for patterns, making
it easy to write them to a file and read 'em back.
"""
def __init__(self, alphabet = None):
"""Intialize the reader and writer class.
Arguments:
o alphabet - An optional argument specifying the alphabet
which patterns should follow. If an alphabet is set it'll be used
to verify that all patterns follow it.
Attributes:
o separator - A character to use in separating items in a signature
when it is written to a file and read back. This character should
not be in the possible alphabet of the sequences, or there will
be trouble.
"""
self._alphabet = alphabet
self.separator = ";"
def write(self, pattern_list, output_handle):
"""Write a list of patterns to the given handle.
"""
for pattern in pattern_list:
# deal with signatures, concatentate them with the separator
if (type(pattern) == type([]) or
type(pattern) == type(tuple([]))):
string_pattern = self.separator.join(pattern)
# deal with the normal cases
else:
string_pattern = pattern
output_handle.write("%s\n" % string_pattern)
def write_seq(self, seq_pattern_list, output_handle):
"""Convenience function to write Seq objects to a file.
This can take Seqs and MutableSeqs, and write them to a file
as strings.
"""
# convert the seq patterns into just string patterns
all_patterns = []
for seq_pattern in seq_pattern_list:
if isinstance(seq_pattern, MutableSeq):
seq = seq_pattern.toseq()
all_patterns.append(seq.data)
elif isinstance(seq_pattern, Seq):
all_patterns.append(seq_pattern.data)
else:
raise ValueError("Unexpected pattern type %r" % seq_pattern)
self.write(all_patterns, output_handle)
def read(self, input_handle):
"""Read patterns from the specified handle.
"""
all_patterns = []
while 1:
cur_line = input_handle.readline()
if not(cur_line):
break
cur_pattern = cur_line.rstrip()
# split up signatures
if cur_pattern.find(self.separator) >= 0:
cur_pattern = tuple(cur_pattern.split(self.separator))
if self._alphabet is not None:
# make single patterns (not signatures) into lists, so we
# can check signatures and single patterns the same
if type(cur_pattern) != type(tuple([])):
test_pattern = [cur_pattern]
else:
test_pattern = cur_pattern
for pattern_item in test_pattern:
pattern_seq = Seq(pattern_item, self._alphabet)
if not(utils.verify_alphabet(pattern_seq)):
raise ValueError("Pattern %s not matching alphabet %s"
% (cur_pattern, self._alphabet))
all_patterns.append(cur_pattern)
return all_patterns
class PatternRepository:
"""This holds a list of specific patterns found in sequences.
This is designed to be a general holder for a set of patterns and
should be subclassed for specific implementations (ie. holding Motifs
or Signatures.
"""
def __init__(self, pattern_info):
"""Initialize a repository with patterns,
Arguments:
o pattern_info - A representation of all of the patterns found in
a *Finder search. This should be a dictionary, where the keys
are patterns, and the values are the number of times a pattern is
found.
The patterns are represented interally as a list of two
tuples, where the first element is the number of times a pattern
occurs, and the second is the pattern itself. This makes it easy
to sort the list and return the top N patterns.
"""
self._pattern_dict = pattern_info
# create the list representation
self._pattern_list = []
for pattern_name in self._pattern_dict.keys():
self._pattern_list.append((self._pattern_dict[pattern_name],
pattern_name))
self._pattern_list.sort()
self._pattern_list.reverse()
def get_all(self):
"""Retrieve all of the patterns in the repository.
"""
patterns = []
for pattern_info in self._pattern_list:
patterns.append(pattern_info[1])
return patterns
def get_random(self, num_patterns):
"""Retrieve the specified number of patterns randomly.
Randomly selects patterns from the list and returns them.
Arguments:
o num_patterns - The total number of patterns to return.
"""
all_patterns = []
while len(all_patterns) < num_patterns:
# pick a pattern, and only add it if it is not already present
new_pattern_info = random.choice(self._pattern_list)
if new_pattern_info[1] not in all_patterns:
all_patterns.append(new_pattern_info[1])
return all_patterns
def get_top_percentage(self, percent):
"""Return a percentage of the patterns.
This returns the top 'percent' percentage of the patterns in the
repository.
"""
all_patterns = self.get_all()
num_to_return = int(len(all_patterns) * percent)
return all_patterns[:num_to_return]
def get_top(self, num_patterns):
"""Return the specified number of most frequently occurring patterns
Arguments:
o num_patterns - The number of patterns to return.
"""
all_patterns = []
for pattern_info in self._pattern_list[:num_patterns]:
all_patterns.append(pattern_info[1])
return all_patterns
def get_differing(self, top_num, bottom_num):
"""Retrieve patterns that are at the extreme ranges.
This returns both patterns at the top of the list (ie. the same as
returned by get_top) and at the bottom of the list. This
is especially useful for patterns that are the differences between
two sets of patterns.
Arguments:
o top_num - The number of patterns to take from the top of the list.
o bottom_num - The number of patterns to take from the bottom of
the list.
"""
all_patterns = []
# first get from the top of the list
for pattern_info in self._pattern_list[:top_num]:
all_patterns.append(pattern_info[1])
# then from the bottom
for pattern_info in self._pattern_list[-bottom_num:]:
all_patterns.append(pattern_info[1])
return all_patterns
def remove_polyA(self, at_percentage = .9):
"""Remove patterns which are likely due to polyA tails from the lists.
This is just a helper function to remove pattenrs which are likely
just due to polyA tails, and thus are not really great motifs.
This will also get rid of stuff like ATATAT, which might be a
useful motif, so use at your own discretion.
XXX Could we write a more general function, based on info content
or something like that?
Arguments:
o at_percentage - The percentage of A and T residues in a pattern
that qualifies it for being removed.
"""
remove_list = []
# find all of the really AT rich patterns
for pattern_info in self._pattern_list:
pattern_at = float(pattern_info[1].count('A') + pattern_info[1].count('T')) / len(pattern_info[1])
if pattern_at > at_percentage:
remove_list.append(pattern_info)
# now remove them from the master list
for to_remove in remove_list:
self._pattern_list.remove(to_remove)
def count(self, pattern):
"""Return the number of times the specified pattern is found.
"""
try:
return self._pattern_dict[pattern]
except KeyError:
return 0
| """Generic functionality useful for all gene representations.
This module contains classes which can be used for all the different
types of patterns available for representing gene information (ie. motifs,
signatures and schemas). These are the general classes which should be
handle any of the different specific patterns.
"""
# standard library
import random
# biopython
from Bio import utils
from Bio.Seq import Seq, MutableSeq
class PatternIO:
"""Allow reading and writing of patterns to files.
This just defines a simple persistance class for patterns, making
it easy to write them to a file and read 'em back.
"""
def __init__(self, alphabet = None):
"""Intialize the reader and writer class.
Arguments:
o alphabet - An optional argument specifying the alphabet
which patterns should follow. If an alphabet is set it'll be used
to verify that all patterns follow it.
Attributes:
o separator - A character to use in separating items in a signature
when it is written to a file and read back. This character should
not be in the possible alphabet of the sequences, or there will
be trouble.
"""
self._alphabet = alphabet
self.separator = ";"
def write(self, pattern_list, output_handle):
"""Write a list of patterns to the given handle.
"""
for pattern in pattern_list:
# deal with signatures, concatentate them with the separator
if (type(pattern) == type([]) or
type(pattern) == type(tuple([]))):
string_pattern = self.separator.join(pattern)
# deal with the normal cases
else:
string_pattern = pattern
output_handle.write("%s\n" % string_pattern)
def write_seq(self, seq_pattern_list, output_handle):
"""Convenience function to write Seq objects to a file.
This can take Seqs and MutableSeqs, and write them to a file
as strings.
"""
# convert the seq patterns into just string patterns
all_patterns = []
for seq_pattern in seq_pattern_list:
if isinstance(seq_pattern, MutableSeq):
seq = seq_pattern.toseq()
all_patterns.append(seq.data)
elif isinstance(seq_pattern, Seq):
all_patterns.append(seq_pattern.data)
else:
raise ValueError("Unexpected pattern type %r" % seq_pattern)
self.write(all_patterns, output_handle)
def read(self, input_handle):
"""Read patterns from the specified handle.
"""
all_patterns = []
while 1:
cur_line = input_handle.readline()
if not(cur_line):
break
cur_pattern = cur_line.rstrip()
# split up signatures
if cur_pattern.find(self.separator) >= 0:
cur_pattern = tuple(cur_pattern.split(self.separator))
if self._alphabet is not None:
# make single patterns (not signatures) into lists, so we
# can check signatures and single patterns the same
if type(cur_pattern) != type(tuple([])):
test_pattern = [cur_pattern]
else:
test_pattern = cur_pattern
for pattern_item in test_pattern:
pattern_seq = Seq(pattern_item, self._alphabet)
if not(utils.verify_alphabet(pattern_seq)):
raise ValueError("Pattern %s not matching alphabet %s"
% (cur_pattern, self._alphabet))
all_patterns.append(cur_pattern)
return all_patterns
class PatternRepository:
"""This holds a list of specific patterns found in sequences.
This is designed to be a general holder for a set of patterns and
should be subclassed for specific implementations (ie. holding Motifs
or Signatures.
"""
def __init__(self, pattern_info):
"""Initialize a repository with patterns,
Arguments:
o pattern_info - A representation of all of the patterns found in
a *Finder search. This should be a dictionary, where the keys
are patterns, and the values are the number of times a pattern is
found.
The patterns are represented interally as a list of two
tuples, where the first element is the number of times a pattern
occurs, and the second is the pattern itself. This makes it easy
to sort the list and return the top N patterns.
"""
self._pattern_dict = pattern_info
# create the list representation
self._pattern_list = []
for pattern_name in self._pattern_dict.keys():
self._pattern_list.append((self._pattern_dict[pattern_name],
pattern_name))
self._pattern_list.sort()
self._pattern_list.reverse()
def get_all(self):
"""Retrieve all of the patterns in the repository.
"""
patterns = []
for pattern_info in self._pattern_list:
patterns.append(pattern_info[1])
return patterns
def get_random(self, num_patterns):
"""Retrieve the specified number of patterns randomly.
Randomly selects patterns from the list and returns them.
Arguments:
o num_patterns - The total number of patterns to return.
"""
all_patterns = []
while len(all_patterns) < num_patterns:
# pick a pattern, and only add it if it is not already present
new_pattern_info = random.choice(self._pattern_list)
if new_pattern_info[1] not in all_patterns:
all_patterns.append(new_pattern_info[1])
return all_patterns
def get_top_percentage(self, percent):
"""Return a percentage of the patterns.
This returns the top 'percent' percentage of the patterns in the
repository.
"""
all_patterns = self.get_all()
num_to_return = int(len(all_patterns) * percent)
return all_patterns[:num_to_return]
def get_top(self, num_patterns):
"""Return the specified number of most frequently occurring patterns
Arguments:
o num_patterns - The number of patterns to return.
"""
all_patterns = []
for pattern_info in self._pattern_list[:num_patterns]:
all_patterns.append(pattern_info[1])
return all_patterns
def get_differing(self, top_num, bottom_num):
"""Retrieve patterns that are at the extreme ranges.
This returns both patterns at the top of the list (ie. the same as
returned by get_top) and at the bottom of the list. This
is especially useful for patterns that are the differences between
two sets of patterns.
Arguments:
o top_num - The number of patterns to take from the top of the list.
o bottom_num - The number of patterns to take from the bottom of
the list.
"""
all_patterns = []
# first get from the top of the list
for pattern_info in self._pattern_list[:top_num]:
all_patterns.append(pattern_info[1])
# then from the bottom
for pattern_info in self._pattern_list[-bottom_num:]:
all_patterns.append(pattern_info[1])
return all_patterns
def remove_polyA(self, at_percentage = .9):
"""Remove patterns which are likely due to polyA tails from the lists.
This is just a helper function to remove pattenrs which are likely
just due to polyA tails, and thus are not really great motifs.
This will also get rid of stuff like ATATAT, which might be a
useful motif, so use at your own discretion.
XXX Could we write a more general function, based on info content
or something like that?
Arguments:
o at_percentage - The percentage of A and T residues in a pattern
that qualifies it for being removed.
"""
remove_list = []
# find all of the really AT rich patterns
for pattern_info in self._pattern_list:
pattern_at = float(pattern_info[1].count('A') + pattern_info[1].count('T')) / len(pattern_info[1])
if pattern_at > at_percentage:
remove_list.append(pattern_info)
# now remove them from the master list
for to_remove in remove_list:
self._pattern_list.remove(to_remove)
def count(self, pattern):
"""Return the number of times the specified pattern is found.
"""
try:
return self._pattern_dict[pattern]
except KeyError:
return 0
| en | 0.88233 | Generic functionality useful for all gene representations. This module contains classes which can be used for all the different types of patterns available for representing gene information (ie. motifs, signatures and schemas). These are the general classes which should be handle any of the different specific patterns. # standard library # biopython Allow reading and writing of patterns to files. This just defines a simple persistance class for patterns, making it easy to write them to a file and read 'em back. Intialize the reader and writer class. Arguments: o alphabet - An optional argument specifying the alphabet which patterns should follow. If an alphabet is set it'll be used to verify that all patterns follow it. Attributes: o separator - A character to use in separating items in a signature when it is written to a file and read back. This character should not be in the possible alphabet of the sequences, or there will be trouble. Write a list of patterns to the given handle. # deal with signatures, concatentate them with the separator # deal with the normal cases Convenience function to write Seq objects to a file. This can take Seqs and MutableSeqs, and write them to a file as strings. # convert the seq patterns into just string patterns Read patterns from the specified handle. # split up signatures # make single patterns (not signatures) into lists, so we # can check signatures and single patterns the same This holds a list of specific patterns found in sequences. This is designed to be a general holder for a set of patterns and should be subclassed for specific implementations (ie. holding Motifs or Signatures. Initialize a repository with patterns, Arguments: o pattern_info - A representation of all of the patterns found in a *Finder search. This should be a dictionary, where the keys are patterns, and the values are the number of times a pattern is found. The patterns are represented interally as a list of two tuples, where the first element is the number of times a pattern occurs, and the second is the pattern itself. This makes it easy to sort the list and return the top N patterns. # create the list representation Retrieve all of the patterns in the repository. Retrieve the specified number of patterns randomly. Randomly selects patterns from the list and returns them. Arguments: o num_patterns - The total number of patterns to return. # pick a pattern, and only add it if it is not already present Return a percentage of the patterns. This returns the top 'percent' percentage of the patterns in the repository. Return the specified number of most frequently occurring patterns Arguments: o num_patterns - The number of patterns to return. Retrieve patterns that are at the extreme ranges. This returns both patterns at the top of the list (ie. the same as returned by get_top) and at the bottom of the list. This is especially useful for patterns that are the differences between two sets of patterns. Arguments: o top_num - The number of patterns to take from the top of the list. o bottom_num - The number of patterns to take from the bottom of the list. # first get from the top of the list # then from the bottom Remove patterns which are likely due to polyA tails from the lists. This is just a helper function to remove pattenrs which are likely just due to polyA tails, and thus are not really great motifs. This will also get rid of stuff like ATATAT, which might be a useful motif, so use at your own discretion. XXX Could we write a more general function, based on info content or something like that? Arguments: o at_percentage - The percentage of A and T residues in a pattern that qualifies it for being removed. # find all of the really AT rich patterns # now remove them from the master list Return the number of times the specified pattern is found. | 3.342377 | 3 |
neslter/parsing/nut/__init__.py | WHOIGit/nes-lter-ims | 3 | 723 | <gh_stars>1-10
from .nut import parse_nut, format_nut, merge_nut_bottles
| from .nut import parse_nut, format_nut, merge_nut_bottles | none | 1 | 1.038438 | 1 |
|
legtool/tabs/servo_tab.py | jpieper/legtool | 10 | 724 | # Copyright 2014 <NAME>, <EMAIL>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import trollius as asyncio
from trollius import Task, From, Return
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from ..servo import selector
from .common import BoolContext
from . import gazebo_config_dialog
def spawn(callback):
def start():
Task(callback())
return start
class ServoTab(object):
def __init__(self, ui, status):
self.ui = ui
self.status = status
self.servo_controls = []
self.monitor_thread = None
self.servo_model = ''
self.servo_name_map = {}
self.ui.statusText.setText('not connected')
self.ui.connectButton.clicked.connect(
spawn(self.handle_connect_clicked))
self.ui.typeCombo.currentIndexChanged.connect(self.handle_type_change)
self.handle_type_change()
self.ui.configureGazeboButton.clicked.connect(
self.handle_configure_gazebo)
servo_layout = QtGui.QVBoxLayout()
servo_layout.setSpacing(0)
servo_layout.setContentsMargins(0, 0, 0, 0)
self.ui.scrollContents.setLayout(servo_layout)
self.ui.servoCountSpin.valueChanged.connect(self.handle_servo_count)
self.handle_servo_count()
self.ui.powerCombo.currentIndexChanged.connect(
spawn(self.handle_power))
self.ui.captureCurrentButton.clicked.connect(
spawn(self.handle_capture_current))
self.update_connected(False)
self.ui.addPoseButton.clicked.connect(self.handle_add_pose)
self.ui.removePoseButton.clicked.connect(self.handle_remove_pose)
self.ui.moveToPoseButton.clicked.connect(
spawn(self.handle_move_to_pose))
self.ui.updatePoseButton.clicked.connect(self.handle_update_pose)
self.ui.poseList.currentItemChanged.connect(
self.handle_poselist_current_changed)
self.controller = None
self.servo_update = BoolContext()
def resizeEvent(self, event):
pass
def poses(self):
result = []
for i in range(self.ui.poseList.count()):
result.append(self.ui.poseList.item(i).text())
return result
def pose(self, name):
for i in range(self.ui.poseList.count()):
if self.ui.poseList.item(i).text() == name:
return self.ui.poseList.item(i).data(QtCore.Qt.UserRole)
return dict([(i, 0.0) for i in range(self.ui.servoCountSpin.value())])
@asyncio.coroutine
def handle_connect_clicked(self):
val = self.ui.typeCombo.currentText().lower()
try:
self.controller = yield From(
selector.select_servo(
val,
serial_port=self.ui.serialPortCombo.currentText(),
model_name=self.servo_model,
servo_name_map=self.servo_name_map))
self.ui.statusText.setText('connected')
self.update_connected(True)
except Exception as e:
self.ui.statusText.setText('error: %s' % str(e))
self.update_connected(False)
def handle_type_change(self):
val = self.ui.typeCombo.currentText().lower()
self.ui.serialPortCombo.setEnabled(val == 'herkulex')
self.ui.configureGazeboButton.setEnabled(val == 'gazebo')
def handle_configure_gazebo(self):
servo_name_map = self.servo_name_map.copy()
for x in range(self.ui.servoCountSpin.value()):
if not x in servo_name_map:
servo_name_map[x] = ''
dialog = gazebo_config_dialog.GazeboConfigDialog(
self.servo_model, servo_name_map)
dialog.setModal(True)
result = dialog.exec_()
if result == QtGui.QDialog.Rejected:
return
self.servo_model = dialog.model_name()
self.servo_name_map = dialog.servo_name_map()
def handle_servo_count(self):
count = self.ui.servoCountSpin.value()
while len(self.servo_controls) > count:
# Remove the last one
last = self.servo_controls[-1]
widget = last['widget']
self.ui.scrollContents.layout().removeWidget(widget)
widget.deleteLater()
self.servo_controls = self.servo_controls[:-1]
while len(self.servo_controls) < count:
# Add a new one.
servo_id = len(self.servo_controls)
label = QtGui.QLabel()
label.setText('ID %d:' % servo_id)
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setRange(-180, 180)
doublespin = QtGui.QDoubleSpinBox()
doublespin.setRange(-180, 180)
doublespin.setDecimals(1)
save = QtGui.QPushButton()
save.setText("Save")
move = QtGui.QPushButton()
move.setText("Move")
current = QtGui.QLabel()
current.setText('N/A')
current.setMinimumWidth(60)
widget = QtGui.QWidget()
layout = QtGui.QHBoxLayout(widget)
layout.addWidget(label)
layout.addWidget(slider)
layout.addWidget(doublespin)
layout.addWidget(save)
layout.addWidget(move)
layout.addWidget(current)
slider.valueChanged.connect(
functools.partial(self.handle_servo_slider, servo_id))
doublespin.valueChanged.connect(
functools.partial(self.handle_servo_spin, servo_id))
save.clicked.connect(
functools.partial(self.handle_servo_save, servo_id))
move.clicked.connect(
functools.partial(self.handle_servo_move, servo_id))
self.ui.scrollContents.layout().addWidget(widget)
self.servo_controls.append({
'widget': widget,
'label': label,
'slider': slider,
'doublespin': doublespin,
'save': save,
'move': move,
'current': current})
@asyncio.coroutine
def handle_power(self):
text = self.ui.powerCombo.currentText().lower()
value = None
if text == 'free':
value = selector.POWER_FREE
elif text == 'brake':
value = selector.POWER_BRAKE
elif text == 'drive':
value = selector.POWER_ENABLE
else:
raise NotImplementedError()
yield From(self.controller.enable_power(value))
def update_connected(self, value):
self.ui.controlGroup.setEnabled(value)
self.ui.posesGroup.setEnabled(value)
if self.monitor_thread is not None:
self.monitor_thread.cancel()
self.monitor_thread = None
if value:
self.handle_power()
self.monitor_thread = Task(self.monitor_status())
@asyncio.coroutine
def monitor_status(self):
voltages = {}
temperatures = {}
ident = 0
while True:
if (self.controller is not None and
hasattr(self.controller, 'get_voltage')):
try:
ident = (ident + 1) % len(self.servo_controls)
this_voltage = yield From(
self.controller.get_voltage([ident]))
voltages.update(this_voltage)
# Get all temperatures.
this_temp = yield From(
self.controller.get_temperature([ident]))
temperatures.update(this_temp)
def non_None(value):
return [x for x in value if x is not None]
message = "Servo status: "
if len(non_None(voltages.values())):
message += "%.1f/%.1fV" % (
min(non_None(voltages.values())),
max(non_None(voltages.values())))
if len(non_None(temperatures.values())):
message += " %.1f/%.1fC" % (
min(non_None(temperatures.values())),
max(non_None(temperatures.values())))
self.status.showMessage(message, 10000)
except Exception as e:
traceback.print_exc()
print "Error reading servo:", type(e), e
yield From(asyncio.sleep(2.0))
@asyncio.coroutine
def set_single_pose(self, servo_id, value):
yield From(
self.controller.set_single_pose(servo_id, value, pose_time=0.2))
def handle_servo_slider(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['slider'].value()
control['doublespin'].setValue(value)
Task(self.set_single_pose(servo_id, value))
def handle_servo_spin(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['doublespin'].value()
control['slider'].setSliderPosition(int(value))
Task(self.set_single_pose(servo_id, value))
def handle_servo_save(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
current_data = self.ui.poseList.currentItem().data(
QtCore.Qt.UserRole)
current_data[servo_id] = (
self.servo_controls[servo_id]['doublespin'].value())
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, current_data)
self.handle_poselist_current_changed(None, None)
def handle_servo_move(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
self.servo_controls[servo_id]['doublespin'].setValue(data[servo_id])
@asyncio.coroutine
def handle_capture_current(self):
with self.servo_update:
results = yield From(
self.controller.get_pose(range(len(self.servo_controls))))
for ident, angle in results.iteritems():
if angle is None:
continue
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle))
control['doublespin'].setValue(angle)
def add_list_pose(self, name):
self.ui.poseList.addItem(name)
item = self.ui.poseList.item(self.ui.poseList.count() - 1)
item.setFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsSelectable)
return item
def get_new_pose_name(self):
poses = set([self.ui.poseList.item(x).text()
for x in range(self.ui.poseList.count())])
count = 0
while True:
name = 'new_pose_%d' % count
if name not in poses:
return name
count += 1
def generate_pose_data(self):
return dict(
[ (i, control['doublespin'].value())
for i, control in enumerate(self.servo_controls) ])
def handle_add_pose(self):
pose_name = self.get_new_pose_name()
item = self.add_list_pose(pose_name)
item.setData(QtCore.Qt.UserRole, self.generate_pose_data())
self.ui.poseList.editItem(item)
def handle_remove_pose(self):
if self.ui.poseList.currentRow() < 0:
return
pose_name = self.ui.poseList.currentItem().text()
del self.poses[pose_name]
self.ui.poseList.takeItem(self.ui.poseList.currentRow())
@asyncio.coroutine
def handle_move_to_pose(self):
if self.ui.poseList.currentRow() < 0:
return
values = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
yield From(self.controller.set_pose(values, pose_time=1.0))
with self.servo_update:
for ident, angle_deg in values.iteritems():
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle_deg))
control['doublespin'].setValue(angle_deg)
def handle_update_pose(self):
if self.ui.poseList.currentRow() < 0:
return
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, self.generate_pose_data())
self.handle_poselist_current_changed(None, None)
def handle_poselist_current_changed(self, current, previous):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
for i, control in enumerate(self.servo_controls):
control['current'].setText('%.1f' % data[i])
def read_settings(self, config):
if not config.has_section('servo'):
return
self.ui.typeCombo.setCurrentIndex(config.getint('servo', 'type'))
self.ui.serialPortCombo.setEditText(config.get('servo', 'port'))
self.ui.servoCountSpin.setValue(config.getint('servo', 'count'))
self.servo_model = config.get('servo', 'model')
if config.has_section('servo.names'):
self.servo_name_map = {}
for name, value in config.items('servo.names'):
self.servo_name_map[int(name)] = value
if config.has_section('servo.poses'):
for name, value in config.items('servo.poses'):
this_data = {}
for element in value.split(','):
ident, angle_deg = element.split('=')
this_data[int(ident)] = float(angle_deg)
item = self.add_list_pose(name)
item.setData(QtCore.Qt.UserRole, this_data)
def write_settings(self, config):
config.add_section('servo')
config.add_section('servo.poses')
config.add_section('servo.names')
config.set('servo', 'type', self.ui.typeCombo.currentIndex())
config.set('servo', 'port', self.ui.serialPortCombo.currentText())
config.set('servo', 'count', self.ui.servoCountSpin.value())
config.set('servo', 'model', self.servo_model)
for key, value in self.servo_name_map.iteritems():
config.set('servo.names', str(key), value)
for row in range(self.ui.poseList.count()):
item = self.ui.poseList.item(row)
pose_name = item.text()
values = item.data(QtCore.Qt.UserRole)
config.set(
'servo.poses', pose_name,
','.join(['%d=%.2f' % (ident, angle_deg)
for ident, angle_deg in values.iteritems()]))
| # Copyright 2014 <NAME>, <EMAIL>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import trollius as asyncio
from trollius import Task, From, Return
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from ..servo import selector
from .common import BoolContext
from . import gazebo_config_dialog
def spawn(callback):
def start():
Task(callback())
return start
class ServoTab(object):
def __init__(self, ui, status):
self.ui = ui
self.status = status
self.servo_controls = []
self.monitor_thread = None
self.servo_model = ''
self.servo_name_map = {}
self.ui.statusText.setText('not connected')
self.ui.connectButton.clicked.connect(
spawn(self.handle_connect_clicked))
self.ui.typeCombo.currentIndexChanged.connect(self.handle_type_change)
self.handle_type_change()
self.ui.configureGazeboButton.clicked.connect(
self.handle_configure_gazebo)
servo_layout = QtGui.QVBoxLayout()
servo_layout.setSpacing(0)
servo_layout.setContentsMargins(0, 0, 0, 0)
self.ui.scrollContents.setLayout(servo_layout)
self.ui.servoCountSpin.valueChanged.connect(self.handle_servo_count)
self.handle_servo_count()
self.ui.powerCombo.currentIndexChanged.connect(
spawn(self.handle_power))
self.ui.captureCurrentButton.clicked.connect(
spawn(self.handle_capture_current))
self.update_connected(False)
self.ui.addPoseButton.clicked.connect(self.handle_add_pose)
self.ui.removePoseButton.clicked.connect(self.handle_remove_pose)
self.ui.moveToPoseButton.clicked.connect(
spawn(self.handle_move_to_pose))
self.ui.updatePoseButton.clicked.connect(self.handle_update_pose)
self.ui.poseList.currentItemChanged.connect(
self.handle_poselist_current_changed)
self.controller = None
self.servo_update = BoolContext()
def resizeEvent(self, event):
pass
def poses(self):
result = []
for i in range(self.ui.poseList.count()):
result.append(self.ui.poseList.item(i).text())
return result
def pose(self, name):
for i in range(self.ui.poseList.count()):
if self.ui.poseList.item(i).text() == name:
return self.ui.poseList.item(i).data(QtCore.Qt.UserRole)
return dict([(i, 0.0) for i in range(self.ui.servoCountSpin.value())])
@asyncio.coroutine
def handle_connect_clicked(self):
val = self.ui.typeCombo.currentText().lower()
try:
self.controller = yield From(
selector.select_servo(
val,
serial_port=self.ui.serialPortCombo.currentText(),
model_name=self.servo_model,
servo_name_map=self.servo_name_map))
self.ui.statusText.setText('connected')
self.update_connected(True)
except Exception as e:
self.ui.statusText.setText('error: %s' % str(e))
self.update_connected(False)
def handle_type_change(self):
val = self.ui.typeCombo.currentText().lower()
self.ui.serialPortCombo.setEnabled(val == 'herkulex')
self.ui.configureGazeboButton.setEnabled(val == 'gazebo')
def handle_configure_gazebo(self):
servo_name_map = self.servo_name_map.copy()
for x in range(self.ui.servoCountSpin.value()):
if not x in servo_name_map:
servo_name_map[x] = ''
dialog = gazebo_config_dialog.GazeboConfigDialog(
self.servo_model, servo_name_map)
dialog.setModal(True)
result = dialog.exec_()
if result == QtGui.QDialog.Rejected:
return
self.servo_model = dialog.model_name()
self.servo_name_map = dialog.servo_name_map()
def handle_servo_count(self):
count = self.ui.servoCountSpin.value()
while len(self.servo_controls) > count:
# Remove the last one
last = self.servo_controls[-1]
widget = last['widget']
self.ui.scrollContents.layout().removeWidget(widget)
widget.deleteLater()
self.servo_controls = self.servo_controls[:-1]
while len(self.servo_controls) < count:
# Add a new one.
servo_id = len(self.servo_controls)
label = QtGui.QLabel()
label.setText('ID %d:' % servo_id)
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setRange(-180, 180)
doublespin = QtGui.QDoubleSpinBox()
doublespin.setRange(-180, 180)
doublespin.setDecimals(1)
save = QtGui.QPushButton()
save.setText("Save")
move = QtGui.QPushButton()
move.setText("Move")
current = QtGui.QLabel()
current.setText('N/A')
current.setMinimumWidth(60)
widget = QtGui.QWidget()
layout = QtGui.QHBoxLayout(widget)
layout.addWidget(label)
layout.addWidget(slider)
layout.addWidget(doublespin)
layout.addWidget(save)
layout.addWidget(move)
layout.addWidget(current)
slider.valueChanged.connect(
functools.partial(self.handle_servo_slider, servo_id))
doublespin.valueChanged.connect(
functools.partial(self.handle_servo_spin, servo_id))
save.clicked.connect(
functools.partial(self.handle_servo_save, servo_id))
move.clicked.connect(
functools.partial(self.handle_servo_move, servo_id))
self.ui.scrollContents.layout().addWidget(widget)
self.servo_controls.append({
'widget': widget,
'label': label,
'slider': slider,
'doublespin': doublespin,
'save': save,
'move': move,
'current': current})
@asyncio.coroutine
def handle_power(self):
text = self.ui.powerCombo.currentText().lower()
value = None
if text == 'free':
value = selector.POWER_FREE
elif text == 'brake':
value = selector.POWER_BRAKE
elif text == 'drive':
value = selector.POWER_ENABLE
else:
raise NotImplementedError()
yield From(self.controller.enable_power(value))
def update_connected(self, value):
self.ui.controlGroup.setEnabled(value)
self.ui.posesGroup.setEnabled(value)
if self.monitor_thread is not None:
self.monitor_thread.cancel()
self.monitor_thread = None
if value:
self.handle_power()
self.monitor_thread = Task(self.monitor_status())
@asyncio.coroutine
def monitor_status(self):
voltages = {}
temperatures = {}
ident = 0
while True:
if (self.controller is not None and
hasattr(self.controller, 'get_voltage')):
try:
ident = (ident + 1) % len(self.servo_controls)
this_voltage = yield From(
self.controller.get_voltage([ident]))
voltages.update(this_voltage)
# Get all temperatures.
this_temp = yield From(
self.controller.get_temperature([ident]))
temperatures.update(this_temp)
def non_None(value):
return [x for x in value if x is not None]
message = "Servo status: "
if len(non_None(voltages.values())):
message += "%.1f/%.1fV" % (
min(non_None(voltages.values())),
max(non_None(voltages.values())))
if len(non_None(temperatures.values())):
message += " %.1f/%.1fC" % (
min(non_None(temperatures.values())),
max(non_None(temperatures.values())))
self.status.showMessage(message, 10000)
except Exception as e:
traceback.print_exc()
print "Error reading servo:", type(e), e
yield From(asyncio.sleep(2.0))
@asyncio.coroutine
def set_single_pose(self, servo_id, value):
yield From(
self.controller.set_single_pose(servo_id, value, pose_time=0.2))
def handle_servo_slider(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['slider'].value()
control['doublespin'].setValue(value)
Task(self.set_single_pose(servo_id, value))
def handle_servo_spin(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['doublespin'].value()
control['slider'].setSliderPosition(int(value))
Task(self.set_single_pose(servo_id, value))
def handle_servo_save(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
current_data = self.ui.poseList.currentItem().data(
QtCore.Qt.UserRole)
current_data[servo_id] = (
self.servo_controls[servo_id]['doublespin'].value())
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, current_data)
self.handle_poselist_current_changed(None, None)
def handle_servo_move(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
self.servo_controls[servo_id]['doublespin'].setValue(data[servo_id])
@asyncio.coroutine
def handle_capture_current(self):
with self.servo_update:
results = yield From(
self.controller.get_pose(range(len(self.servo_controls))))
for ident, angle in results.iteritems():
if angle is None:
continue
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle))
control['doublespin'].setValue(angle)
def add_list_pose(self, name):
self.ui.poseList.addItem(name)
item = self.ui.poseList.item(self.ui.poseList.count() - 1)
item.setFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsSelectable)
return item
def get_new_pose_name(self):
poses = set([self.ui.poseList.item(x).text()
for x in range(self.ui.poseList.count())])
count = 0
while True:
name = 'new_pose_%d' % count
if name not in poses:
return name
count += 1
def generate_pose_data(self):
return dict(
[ (i, control['doublespin'].value())
for i, control in enumerate(self.servo_controls) ])
def handle_add_pose(self):
pose_name = self.get_new_pose_name()
item = self.add_list_pose(pose_name)
item.setData(QtCore.Qt.UserRole, self.generate_pose_data())
self.ui.poseList.editItem(item)
def handle_remove_pose(self):
if self.ui.poseList.currentRow() < 0:
return
pose_name = self.ui.poseList.currentItem().text()
del self.poses[pose_name]
self.ui.poseList.takeItem(self.ui.poseList.currentRow())
@asyncio.coroutine
def handle_move_to_pose(self):
if self.ui.poseList.currentRow() < 0:
return
values = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
yield From(self.controller.set_pose(values, pose_time=1.0))
with self.servo_update:
for ident, angle_deg in values.iteritems():
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle_deg))
control['doublespin'].setValue(angle_deg)
def handle_update_pose(self):
if self.ui.poseList.currentRow() < 0:
return
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, self.generate_pose_data())
self.handle_poselist_current_changed(None, None)
def handle_poselist_current_changed(self, current, previous):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
for i, control in enumerate(self.servo_controls):
control['current'].setText('%.1f' % data[i])
def read_settings(self, config):
if not config.has_section('servo'):
return
self.ui.typeCombo.setCurrentIndex(config.getint('servo', 'type'))
self.ui.serialPortCombo.setEditText(config.get('servo', 'port'))
self.ui.servoCountSpin.setValue(config.getint('servo', 'count'))
self.servo_model = config.get('servo', 'model')
if config.has_section('servo.names'):
self.servo_name_map = {}
for name, value in config.items('servo.names'):
self.servo_name_map[int(name)] = value
if config.has_section('servo.poses'):
for name, value in config.items('servo.poses'):
this_data = {}
for element in value.split(','):
ident, angle_deg = element.split('=')
this_data[int(ident)] = float(angle_deg)
item = self.add_list_pose(name)
item.setData(QtCore.Qt.UserRole, this_data)
def write_settings(self, config):
config.add_section('servo')
config.add_section('servo.poses')
config.add_section('servo.names')
config.set('servo', 'type', self.ui.typeCombo.currentIndex())
config.set('servo', 'port', self.ui.serialPortCombo.currentText())
config.set('servo', 'count', self.ui.servoCountSpin.value())
config.set('servo', 'model', self.servo_model)
for key, value in self.servo_name_map.iteritems():
config.set('servo.names', str(key), value)
for row in range(self.ui.poseList.count()):
item = self.ui.poseList.item(row)
pose_name = item.text()
values = item.data(QtCore.Qt.UserRole)
config.set(
'servo.poses', pose_name,
','.join(['%d=%.2f' % (ident, angle_deg)
for ident, angle_deg in values.iteritems()]))
| en | 0.829143 | # Copyright 2014 <NAME>, <EMAIL>. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Remove the last one # Add a new one. # Get all temperatures. | 2.27347 | 2 |
epsilon/juice.py | twisted/epsilon | 4 | 725 | # -*- test-case-name: epsilon.test.test_juice -*-
# Copyright 2005 Divmod, Inc. See LICENSE file for details
import warnings, pprint
import keyword
import io
import six
from twisted.internet.main import CONNECTION_LOST
from twisted.internet.defer import Deferred, maybeDeferred, fail
from twisted.internet.protocol import ServerFactory, ClientFactory
from twisted.internet.ssl import Certificate
from twisted.python.failure import Failure
from twisted.python import log, filepath
from epsilon.liner import LineReceiver
from epsilon.compat import long
from epsilon import extime
ASK = '_ask'
ANSWER = '_answer'
COMMAND = '_command'
ERROR = '_error'
ERROR_CODE = '_error_code'
ERROR_DESCRIPTION = '_error_description'
LENGTH = '_length'
BODY = 'body'
debug = False
class JuiceBox(dict):
""" I am a packet in the JUICE protocol. """
def __init__(self, __body='', **kw):
self.update(kw)
if __body:
assert isinstance(__body, str), "body must be a string: %r" % ( repr(__body),)
self['body'] = __body
def body():
def get(self):
warnings.warn("body attribute of boxes is now just a regular field",
stacklevel=2)
return self['body']
def set(self, newbody):
warnings.warn("body attribute of boxes is now just a regular field",
stacklevel=2)
self['body'] = newbody
return get,set
body = property(*body())
def copy(self):
newBox = self.__class__()
newBox.update(self)
return newBox
def serialize(self,
delimiter=b'\r\n',
escaped=b'\r\n '):
assert LENGTH not in self
delimiter = six.ensure_binary(delimiter)
escaped = six.ensure_binary(escaped)
L = []
for (k, v) in six.viewitems(self):
if k == BODY:
k = LENGTH
v = str(len(self[BODY]))
L.append(six.ensure_binary(k).replace(b'_', b'-').title())
L.append(b': ')
L.append(six.ensure_binary(v).replace(delimiter, escaped))
L.append(delimiter)
L.append(delimiter)
if BODY in self:
L.append(six.ensure_binary(self[BODY]))
return b''.join(L)
def sendTo(self, proto):
"""
Serialize and send this box to a Juice instance. By the time it is
being sent, several keys are required. I must have exactly ONE of::
-ask
-answer
-error
If the '-ask' header is set, then the '-command' header must also be
set.
"""
proto.sendPacket(self)
# juice.Box => JuiceBox
Box = JuiceBox
class TLSBox(JuiceBox):
def __repr__(self):
return 'TLS(**%s)' % (super(TLSBox, self).__repr__(),)
def __init__(self, __certificate, __verify=None, __sslstarted=None, **kw):
super(TLSBox, self).__init__(**kw)
self.certificate = __certificate
self.verify = __verify
self.sslstarted = __sslstarted
def sendTo(self, proto):
super(TLSBox, self).sendTo(proto)
if self.verify is None:
proto.startTLS(self.certificate)
else:
proto.startTLS(self.certificate, self.verify)
if self.sslstarted is not None:
self.sslstarted()
class QuitBox(JuiceBox):
def __repr__(self):
return 'Quit(**%s)' % (super(QuitBox, self).__repr__(),)
def sendTo(self, proto):
super(QuitBox, self).sendTo(proto)
proto.transport.loseConnection()
class _SwitchBox(JuiceBox):
def __repr__(self):
return 'Switch(**%s)' % (super(_SwitchBox, self).__repr__(),)
def __init__(self, __proto, **kw):
super(_SwitchBox, self).__init__(**kw)
self.innerProto = __proto
def sendTo(self, proto):
super(_SwitchBox, self).sendTo(proto)
proto._switchTo(self.innerProto)
class NegotiateBox(JuiceBox):
def __repr__(self):
return 'Negotiate(**%s)' % (super(NegotiateBox, self).__repr__(),)
def sendTo(self, proto):
super(NegotiateBox, self).sendTo(proto)
proto._setProtocolVersion(int(self['version']))
class JuiceError(Exception):
pass
class RemoteJuiceError(JuiceError):
"""
This error indicates that something went wrong on the remote end of the
connection, and the error was serialized and transmitted to you.
"""
def __init__(self, errorCode, description, fatal=False):
"""Create a remote error with an error code and description.
"""
Exception.__init__(self, "Remote[%s]: %s" % (errorCode, description))
self.errorCode = errorCode
self.description = description
self.fatal = fatal
class UnhandledRemoteJuiceError(RemoteJuiceError):
def __init__(self, description):
errorCode = b"UNHANDLED"
RemoteJuiceError.__init__(self, errorCode, description)
class JuiceBoxError(JuiceError):
pass
class MalformedJuiceBox(JuiceBoxError):
pass
class UnhandledCommand(JuiceError):
pass
class IncompatibleVersions(JuiceError):
pass
class _Transactor:
def __init__(self, store, callable):
self.store = store
self.callable = callable
def __call__(self, box):
return self.store.transact(self.callable, box)
def __repr__(self):
return '<Transaction in: %s of: %s>' % (self.store, self.callable)
class DispatchMixin:
baseDispatchPrefix = 'juice_'
autoDispatchPrefix = 'command_'
wrapper = None
def _auto(self, aCallable, proto, namespace=None):
if aCallable is None:
return None
command = aCallable.command
if namespace not in command.namespaces:
# if you're in the wrong namespace, you are very likely not allowed
# to invoke the command you are trying to invoke. some objects
# have commands exposed in a separate namespace for security
# reasons, since the security model is a role : namespace mapping.
log.msg('WRONG NAMESPACE: %r, %r' % (namespace, command.namespaces))
return None
def doit(box):
kw = stringsToObjects(box, command.arguments, proto)
for name, extraArg in command.extra:
kw[name] = extraArg.fromTransport(proto.transport)
# def checkIsDict(result):
# if not isinstance(result, dict):
# raise RuntimeError("%r returned %r, not dictionary" % (
# aCallable, result))
# return result
def checkKnownErrors(error):
key = error.trap(*command.allErrors)
code = command.allErrors[key]
desc = str(error.value)
return Failure(RemoteJuiceError(
code, desc, error in command.fatalErrors))
return maybeDeferred(aCallable, **kw).addCallback(
command.makeResponse, proto).addErrback(
checkKnownErrors)
return doit
def _wrap(self, aCallable):
if aCallable is None:
return None
wrap = self.wrapper
if wrap is not None:
return wrap(aCallable)
else:
return aCallable
def normalizeCommand(self, cmd):
"""Return the canonical form of a command.
"""
return cmd.upper().strip().replace('-', '_')
def lookupFunction(self, proto, name, namespace):
"""Return a callable to invoke when executing the named command.
"""
# Try to find a method to be invoked in a transaction first
# Otherwise fallback to a "regular" method
fName = self.autoDispatchPrefix + name
fObj = getattr(self, fName, None)
if fObj is not None:
# pass the namespace along
return self._auto(fObj, proto, namespace)
assert namespace is None, 'Old-style parsing'
# Fall back to simplistic command dispatching - we probably want to get
# rid of this eventually, there's no reason to do extra work and write
# fewer docs all the time.
fName = self.baseDispatchPrefix + name
return getattr(self, fName, None)
def dispatchCommand(self, proto, cmd, box, namespace=None):
fObj = self.lookupFunction(proto, self.normalizeCommand(cmd), namespace)
if fObj is None:
return fail(UnhandledCommand(cmd))
return maybeDeferred(self._wrap(fObj), box)
def normalizeKey(key):
lkey = six.ensure_str(key).lower().replace('-', '_')
if keyword.iskeyword(lkey):
return lkey.title()
return lkey
def parseJuiceHeaders(lines):
"""
Create a JuiceBox from a list of header lines.
@param lines: a list of lines.
@type lines: a list of L{bytes}
"""
b = JuiceBox()
key = None
for L in lines:
if L[0:1] == b' ':
# continuation
assert key is not None
b[key] += six.ensure_str(b'\r\n' + L[1:])
continue
parts = L.split(b': ', 1)
if len(parts) != 2:
raise MalformedJuiceBox("Wrong number of parts: %r" % (L,))
key, value = parts
key = normalizeKey(key)
b[key] = six.ensure_str(value)
return int(b.pop(LENGTH, 0)), b
class JuiceParserBase(DispatchMixin):
def __init__(self):
self._outstandingRequests = {}
def _puke(self, failure):
log.msg("Juice server or network failure "
"unhandled by client application:")
log.err(failure)
log.msg(
"Dropping connection! "
"To avoid, add errbacks to ALL remote commands!")
if self.transport is not None:
self.transport.loseConnection()
_counter = long(0)
def _nextTag(self):
self._counter += 1
return '%x' % (self._counter,)
def failAllOutgoing(self, reason):
OR = self._outstandingRequests.items()
self._outstandingRequests = None # we can never send another request
for key, value in OR:
value.errback(reason)
def juiceBoxReceived(self, box):
if debug:
log.msg("Juice receive: %s" % pprint.pformat(dict(six.viewitems(box))))
if ANSWER in box:
question = self._outstandingRequests.pop(box[ANSWER])
question.addErrback(self._puke)
self._wrap(question.callback)(box)
elif ERROR in box:
question = self._outstandingRequests.pop(box[ERROR])
question.addErrback(self._puke)
self._wrap(question.errback)(
Failure(RemoteJuiceError(box[ERROR_CODE],
box[ERROR_DESCRIPTION])))
elif COMMAND in box:
cmd = box[COMMAND]
def sendAnswer(answerBox):
if ASK not in box:
return
if self.transport is None:
return
answerBox[ANSWER] = box[ASK]
answerBox.sendTo(self)
def sendError(error):
if ASK not in box:
return error
if error.check(RemoteJuiceError):
code = error.value.errorCode
desc = error.value.description
if error.value.fatal:
errorBox = QuitBox()
else:
errorBox = JuiceBox()
else:
errorBox = QuitBox()
log.err(error) # here is where server-side logging happens
# if the error isn't handled
code = 'UNHANDLED'
desc = "Unhandled Remote System Exception "
errorBox[ERROR] = box[ASK]
errorBox[ERROR_DESCRIPTION] = desc
errorBox[ERROR_CODE] = code
if self.transport is not None:
errorBox.sendTo(self)
return None # intentionally stop the error here: don't log the
# traceback if it's handled, do log it (earlier) if
# it isn't
self.dispatchCommand(self, cmd, box).addCallbacks(sendAnswer, sendError
).addErrback(self._puke)
else:
raise RuntimeError(
"Empty packet received over connection-oriented juice: %r" % (box,))
def sendBoxCommand(self, command, box, requiresAnswer=True):
"""
Send a command across the wire with the given C{juice.Box}.
Returns a Deferred which fires with the response C{juice.Box} when it
is received, or fails with a C{juice.RemoteJuiceError} if an error is
received.
If the Deferred fails and the error is not handled by the caller of
this method, the failure will be logged and the connection dropped.
"""
if self._outstandingRequests is None:
return fail(CONNECTION_LOST)
box[COMMAND] = command
tag = self._nextTag()
if requiresAnswer:
box[ASK] = tag
result = self._outstandingRequests[tag] = Deferred()
else:
result = None
box.sendTo(self)
return result
class Argument:
optional = False
def __init__(self, optional=False):
self.optional = optional
def retrieve(self, d, name):
if self.optional:
value = d.get(name)
if value is not None:
del d[name]
else:
value = d.pop(name)
return value
def fromBox(self, name, strings, objects, proto):
st = self.retrieve(strings, name)
if self.optional and st is None:
objects[name] = None
else:
objects[name] = self.fromStringProto(st, proto)
def toBox(self, name, strings, objects, proto):
obj = self.retrieve(objects, name)
if self.optional and obj is None:
# strings[name] = None
return
else:
strings[name] = self.toStringProto(obj, proto)
def fromStringProto(self, inString, proto):
return self.fromString(inString)
def toStringProto(self, inObject, proto):
return self.toString(inObject)
def fromString(self, inString):
raise NotImplementedError()
def toString(self, inObject):
raise NotImplementedError()
class JuiceList(Argument):
def __init__(self, subargs):
self.subargs = subargs
def fromStringProto(self, inString, proto):
boxes = parseString(six.ensure_binary(inString))
values = [stringsToObjects(box, self.subargs, proto)
for box in boxes]
return values
def toStringProto(self, inObject, proto):
return b''.join([
objectsToStrings(objects, self.subargs, Box(), proto).serialize()
for objects in inObject
])
class ListOf(Argument):
def __init__(self, subarg, delimiter=', '):
self.subarg = subarg
self.delimiter = delimiter
def fromStringProto(self, inString, proto):
strings = inString.split(self.delimiter)
L = [self.subarg.fromStringProto(string, proto)
for string in strings]
return L
def toStringProto(self, inObject, proto):
L = []
for inSingle in inObject:
outString = self.subarg.toStringProto(inSingle, proto)
assert self.delimiter not in outString
L.append(outString)
return self.delimiter.join(L)
class Integer(Argument):
fromString = int
def toString(self, inObject):
return str(int(inObject))
class String(Argument):
def toString(self, inObject):
return inObject
def fromString(self, inString):
return inString
class EncodedString(Argument):
def __init__(self, encoding):
self.encoding = encoding
def toString(self, inObject):
return inObject.encode(self.encoding)
def fromString(self, inString):
return inString.decode(self.encoding)
# Temporary backwards compatibility for Exponent
Body = String
class Unicode(String):
def toString(self, inObject):
# assert isinstance(inObject, unicode)
return String.toString(self, inObject.encode('utf-8'))
def fromString(self, inString):
# assert isinstance(inString, str)
return String.fromString(self, inString).decode('utf-8')
class Path(Unicode):
def fromString(self, inString):
return filepath.FilePath(Unicode.fromString(self, inString))
def toString(self, inObject):
return Unicode.toString(self, inObject.path)
class Float(Argument):
fromString = float
toString = str
class Base64Binary(Argument):
def toString(self, inObject):
return inObject.encode('base64').replace('\n', '')
def fromString(self, inString):
return inString.decode('base64')
class Time(Argument):
def toString(self, inObject):
return inObject.asISO8601TimeAndDate()
def fromString(self, inString):
return extime.Time.fromISO8601TimeAndDate(inString)
class ExtraArg:
def fromTransport(self, inTransport):
raise NotImplementedError()
class Peer(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer()
class PeerDomain(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer().domain
class PeerUser(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer().resource
class Host(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost()
class HostDomain(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost().domain
class HostUser(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost().resource
class Boolean(Argument):
def fromString(self, inString):
if inString == 'True':
return True
elif inString == 'False':
return False
else:
raise RuntimeError("Bad boolean value: %r" % (inString,))
def toString(self, inObject):
if inObject:
return 'True'
else:
return 'False'
class _CommandMeta(type):
def __new__(cls, name, bases, attrs):
re = attrs['reverseErrors'] = {}
er = attrs['allErrors'] = {}
for v, k in six.viewitems(attrs.get('errors',{})):
re[k] = v
er[v] = k
for v, k in six.viewitems(attrs.get('fatalErrors',{})):
re[k] = v
er[v] = k
return type.__new__(cls, name, bases, attrs)
@six.add_metaclass(_CommandMeta)
class Command:
arguments = []
response = []
extra = []
namespaces = [None] # This is set to [None] on purpose: None means
# "no namespace", not "empty list". "empty
# list" will make your command invalid in _all_
# namespaces, effectively uncallable.
errors = {}
fatalErrors = {}
commandType = Box
responseType = Box
def commandName():
def get(self):
return self.__class__.__name__
raise NotImplementedError("Missing command name")
return get,
commandName = property(*commandName())
def __init__(self, **kw):
self.structured = kw
givenArgs = [normalizeKey(k) for k in kw.keys()]
forgotten = []
for name, arg in self.arguments:
if normalizeKey(name) not in givenArgs and not arg.optional:
forgotten.append(normalizeKey(name))
# for v in kw.itervalues():
# if v is None:
# from pprint import pformat
# raise RuntimeError("ARGH: %s" % pformat(kw))
if forgotten:
if len(forgotten) == 1:
plural = 'an argument'
else:
plural = 'some arguments'
raise RuntimeError("You forgot %s to %r: %s" % (
plural, self.commandName, ', '.join(forgotten)))
forgotten = []
def makeResponse(cls, objects, proto):
try:
return objectsToStrings(objects, cls.response, cls.responseType(), proto)
except:
log.msg("Exception in %r.makeResponse" % (cls,))
raise
makeResponse = classmethod(makeResponse)
def do(self, proto, namespace=None, requiresAnswer=True):
if namespace is not None:
cmd = namespace + ":" + self.commandName
else:
cmd = self.commandName
def _massageError(error):
error.trap(RemoteJuiceError)
rje = error.value
return Failure(self.reverseErrors.get(rje.errorCode, UnhandledRemoteJuiceError)(rje.description))
d = proto.sendBoxCommand(
cmd, objectsToStrings(self.structured, self.arguments, self.commandType(),
proto),
requiresAnswer)
if requiresAnswer:
d.addCallback(stringsToObjects, self.response, proto)
d.addCallback(self.addExtra, proto.transport)
d.addErrback(_massageError)
return d
def addExtra(self, d, transport):
for name, extraArg in self.extra:
d[name] = extraArg.fromTransport(transport)
return d
class ProtocolSwitchCommand(Command):
"""Use this command to switch from something Juice-derived to a different
protocol mid-connection. This can be useful to use juice as the
connection-startup negotiation phase. Since TLS is a different layer
entirely, you can use Juice to negotiate the security parameters of your
connection, then switch to a different protocol, and the connection will
remain secured.
"""
def __init__(self, __protoToSwitchToFactory, **kw):
self.protoToSwitchToFactory = __protoToSwitchToFactory
super(ProtocolSwitchCommand, self).__init__(**kw)
def makeResponse(cls, innerProto, proto):
return _SwitchBox(innerProto)
makeResponse = classmethod(makeResponse)
def do(self, proto, namespace=None):
d = super(ProtocolSwitchCommand, self).do(proto)
proto._lock()
def switchNow(ign):
innerProto = self.protoToSwitchToFactory.buildProtocol(proto.transport.getPeer())
proto._switchTo(innerProto, self.protoToSwitchToFactory)
return ign
def die(ign):
proto.transport.loseConnection()
return ign
def handle(ign):
self.protoToSwitchToFactory.clientConnectionFailed(None, Failure(CONNECTION_LOST))
return ign
return d.addCallbacks(switchNow, handle).addErrback(die)
class Negotiate(Command):
commandName = 'Negotiate'
arguments = [('versions', ListOf(Integer()))]
response = [('version', Integer())]
responseType = NegotiateBox
class Juice(LineReceiver, JuiceParserBase, object):
"""
JUICE (JUice Is Concurrent Events) is a simple connection-oriented
request/response protocol. Packets, or "boxes", are collections of
RFC2822-inspired headers, plus a body. Note that this is NOT a literal
interpretation of any existing RFC, 822, 2822 or otherwise, but a simpler
version that does not do line continuations, does not specify any
particular format for header values, dispatches semantic meanings of most
headers on the -Command header rather than giving them global meaning, and
allows multiple sets of headers (messages, or JuiceBoxes) on a connection.
All headers whose names begin with a dash ('-') are reserved for use by the
protocol. All others are for application use - their meaning depends on
the value of the "-Command" header.
"""
protocolName = b'juice-base'
hostCertificate = None
MAX_LENGTH = 1024 * 1024
isServer = property(lambda self: self._issueGreeting,
doc="""
True if this is a juice server, e.g. it is going to
issue or has issued a server greeting upon
connection.
""")
isClient = property(lambda self: not self._issueGreeting,
doc="""
True if this is a juice server, e.g. it is not going to
issue or did not issue a server greeting upon
connection.
""")
def __init__(self, issueGreeting):
"""
@param issueGreeting: whether to issue a greeting when connected. This
should be set on server-side Juice protocols.
"""
JuiceParserBase.__init__(self)
self._issueGreeting = issueGreeting
def __repr__(self):
return '<%s %s/%s at 0x%x>' % (self.__class__.__name__, self.isClient and 'client' or 'server', self.innerProtocol, id(self))
__locked = False
def _lock(self):
""" Lock this Juice instance so that no further Juice traffic may be sent.
This is used when sending a request to switch underlying protocols.
You probably want to subclass ProtocolSwitchCommand rather than calling
this directly.
"""
self.__locked = True
innerProtocol = None
def _switchTo(self, newProto, clientFactory=None):
""" Switch this Juice instance to a new protocol. You need to do this
'simultaneously' on both ends of a connection; the easiest way to do
this is to use a subclass of ProtocolSwitchCommand.
"""
assert self.innerProtocol is None, "Protocol can only be safely switched once."
self.setRawMode()
self.innerProtocol = newProto
self.innerProtocolClientFactory = clientFactory
newProto.makeConnection(self.transport)
innerProtocolClientFactory = None
def juiceBoxReceived(self, box):
if self.__locked and COMMAND in box and ASK in box:
# This is a command which will trigger an answer, and we can no
# longer answer anything, so don't bother delivering it.
return
return super(Juice, self).juiceBoxReceived(box)
def sendPacket(self, completeBox):
"""
Send a juice.Box to my peer.
Note: transport.write is never called outside of this method.
"""
assert not self.__locked, "You cannot send juice packets when a connection is locked"
if self._startingTLSBuffer is not None:
self._startingTLSBuffer.append(completeBox)
else:
if debug:
log.msg("Juice send: %s" % pprint.pformat(dict(six.viewitems(completeBox))))
result = completeBox.serialize()
self.transport.write(result)
def sendCommand(self, command, __content='', __answer=True, **kw):
box = JuiceBox(__content, **kw)
return self.sendBoxCommand(command, box, requiresAnswer=__answer)
_outstandingRequests = None
_justStartedTLS = False
def makeConnection(self, transport):
self._transportPeer = transport.getPeer()
self._transportHost = transport.getHost()
log.msg("%s %s connection established (HOST:%s PEER:%s)" % (self.isClient and "client" or "server",
self.__class__.__name__,
self._transportHost,
self._transportPeer))
self._outstandingRequests = {}
self._requestBuffer = []
LineReceiver.makeConnection(self, transport)
_startingTLSBuffer = None
def prepareTLS(self):
self._startingTLSBuffer = []
def startTLS(self, certificate, *verifyAuthorities):
if self.hostCertificate is None:
self.hostCertificate = certificate
self._justStartedTLS = True
self.transport.startTLS(certificate.options(*verifyAuthorities))
stlsb = self._startingTLSBuffer
if stlsb is not None:
self._startingTLSBuffer = None
for box in stlsb:
self.sendPacket(box)
else:
raise RuntimeError(
"Previously authenticated connection between %s and %s "
"is trying to re-establish as %s" % (
self.hostCertificate,
Certificate.peerFromTransport(self.transport),
(certificate, verifyAuthorities)))
def dataReceived(self, data):
# If we successfully receive any data after TLS has been started, that
# means the connection was secured properly. Make a note of that fact.
if self._justStartedTLS:
self._justStartedTLS = False
return LineReceiver.dataReceived(self, data)
def connectionLost(self, reason):
log.msg("%s %s connection lost (HOST:%s PEER:%s)" % (
self.isClient and 'client' or 'server',
self.__class__.__name__,
self._transportHost,
self._transportPeer))
self.failAllOutgoing(reason)
if self.innerProtocol is not None:
self.innerProtocol.connectionLost(reason)
if self.innerProtocolClientFactory is not None:
self.innerProtocolClientFactory.clientConnectionLost(None, reason)
def lineReceived(self, line):
if line:
self._requestBuffer.append(line)
else:
buf = self._requestBuffer
self._requestBuffer = []
bodylen, b = parseJuiceHeaders(buf)
if bodylen:
self._bodyRemaining = bodylen
self._bodyBuffer = []
self._pendingBox = b
self.setRawMode()
else:
self.juiceBoxReceived(b)
def rawDataReceived(self, data):
if self.innerProtocol is not None:
self.innerProtocol.dataReceived(data)
return
self._bodyRemaining -= len(data)
if self._bodyRemaining <= 0:
if self._bodyRemaining < 0:
self._bodyBuffer.append(data[:self._bodyRemaining])
extraData = data[self._bodyRemaining:]
else:
self._bodyBuffer.append(data)
extraData = ''
self._pendingBox['body'] = six.ensure_str(b''.join(six.ensure_binary(each) for each in self._bodyBuffer))
self._bodyBuffer = None
b, self._pendingBox = self._pendingBox, None
self.juiceBoxReceived(b)
if self.innerProtocol is not None:
self.innerProtocol.makeConnection(self.transport)
if extraData:
self.innerProtocol.dataReceived(extraData)
else:
self.setLineMode(extraData)
else:
self._bodyBuffer.append(data)
protocolVersion = 0
def _setProtocolVersion(self, version):
# if we ever want to actually mangle encodings, this is the place to do
# it!
self.protocolVersion = version
return version
def renegotiateVersion(self, newVersion):
assert newVersion in VERSIONS, (
"This side of the connection doesn't support version %r"
% (newVersion,))
v = VERSIONS[:]
v.remove(newVersion)
return Negotiate(versions=[newVersion]).do(self).addCallback(
lambda ver: self._setProtocolVersion(ver['version']))
def command_NEGOTIATE(self, versions):
for version in versions:
if version in VERSIONS:
return dict(version=version)
raise IncompatibleVersions()
command_NEGOTIATE.command = Negotiate
VERSIONS = [1]
class _ParserHelper(Juice):
def __init__(self):
Juice.__init__(self, False)
self.boxes = []
self.results = Deferred()
def getPeer(self):
return 'string'
def getHost(self):
return 'string'
disconnecting = False
def juiceBoxReceived(self, box):
self.boxes.append(box)
# Synchronous helpers
def parse(cls, fileObj):
p = cls()
p.makeConnection(p)
p.dataReceived(fileObj.read())
return p.boxes
parse = classmethod(parse)
def parseString(cls, data):
with io.BytesIO(data) as f:
return cls.parse(f)
parseString = classmethod(parseString)
parse = _ParserHelper.parse
parseString = _ParserHelper.parseString
def stringsToObjects(strings, arglist, proto):
objects = {}
myStrings = strings.copy()
for argname, argparser in arglist:
argparser.fromBox(argname, myStrings, objects, proto)
return objects
def objectsToStrings(objects, arglist, strings, proto):
myObjects = {}
for (k, v) in objects.items():
myObjects[normalizeKey(k)] = v
for argname, argparser in arglist:
argparser.toBox(argname, strings, myObjects, proto)
return strings
class JuiceServerFactory(ServerFactory):
protocol = Juice
def buildProtocol(self, addr):
prot = self.protocol(True)
prot.factory = self
return prot
class JuiceClientFactory(ClientFactory):
protocol = Juice
def buildProtocol(self, addr):
prot = self.protocol(False)
prot.factory = self
return prot
| # -*- test-case-name: epsilon.test.test_juice -*-
# Copyright 2005 Divmod, Inc. See LICENSE file for details
import warnings, pprint
import keyword
import io
import six
from twisted.internet.main import CONNECTION_LOST
from twisted.internet.defer import Deferred, maybeDeferred, fail
from twisted.internet.protocol import ServerFactory, ClientFactory
from twisted.internet.ssl import Certificate
from twisted.python.failure import Failure
from twisted.python import log, filepath
from epsilon.liner import LineReceiver
from epsilon.compat import long
from epsilon import extime
ASK = '_ask'
ANSWER = '_answer'
COMMAND = '_command'
ERROR = '_error'
ERROR_CODE = '_error_code'
ERROR_DESCRIPTION = '_error_description'
LENGTH = '_length'
BODY = 'body'
debug = False
class JuiceBox(dict):
""" I am a packet in the JUICE protocol. """
def __init__(self, __body='', **kw):
self.update(kw)
if __body:
assert isinstance(__body, str), "body must be a string: %r" % ( repr(__body),)
self['body'] = __body
def body():
def get(self):
warnings.warn("body attribute of boxes is now just a regular field",
stacklevel=2)
return self['body']
def set(self, newbody):
warnings.warn("body attribute of boxes is now just a regular field",
stacklevel=2)
self['body'] = newbody
return get,set
body = property(*body())
def copy(self):
newBox = self.__class__()
newBox.update(self)
return newBox
def serialize(self,
delimiter=b'\r\n',
escaped=b'\r\n '):
assert LENGTH not in self
delimiter = six.ensure_binary(delimiter)
escaped = six.ensure_binary(escaped)
L = []
for (k, v) in six.viewitems(self):
if k == BODY:
k = LENGTH
v = str(len(self[BODY]))
L.append(six.ensure_binary(k).replace(b'_', b'-').title())
L.append(b': ')
L.append(six.ensure_binary(v).replace(delimiter, escaped))
L.append(delimiter)
L.append(delimiter)
if BODY in self:
L.append(six.ensure_binary(self[BODY]))
return b''.join(L)
def sendTo(self, proto):
"""
Serialize and send this box to a Juice instance. By the time it is
being sent, several keys are required. I must have exactly ONE of::
-ask
-answer
-error
If the '-ask' header is set, then the '-command' header must also be
set.
"""
proto.sendPacket(self)
# juice.Box => JuiceBox
Box = JuiceBox
class TLSBox(JuiceBox):
def __repr__(self):
return 'TLS(**%s)' % (super(TLSBox, self).__repr__(),)
def __init__(self, __certificate, __verify=None, __sslstarted=None, **kw):
super(TLSBox, self).__init__(**kw)
self.certificate = __certificate
self.verify = __verify
self.sslstarted = __sslstarted
def sendTo(self, proto):
super(TLSBox, self).sendTo(proto)
if self.verify is None:
proto.startTLS(self.certificate)
else:
proto.startTLS(self.certificate, self.verify)
if self.sslstarted is not None:
self.sslstarted()
class QuitBox(JuiceBox):
def __repr__(self):
return 'Quit(**%s)' % (super(QuitBox, self).__repr__(),)
def sendTo(self, proto):
super(QuitBox, self).sendTo(proto)
proto.transport.loseConnection()
class _SwitchBox(JuiceBox):
def __repr__(self):
return 'Switch(**%s)' % (super(_SwitchBox, self).__repr__(),)
def __init__(self, __proto, **kw):
super(_SwitchBox, self).__init__(**kw)
self.innerProto = __proto
def sendTo(self, proto):
super(_SwitchBox, self).sendTo(proto)
proto._switchTo(self.innerProto)
class NegotiateBox(JuiceBox):
def __repr__(self):
return 'Negotiate(**%s)' % (super(NegotiateBox, self).__repr__(),)
def sendTo(self, proto):
super(NegotiateBox, self).sendTo(proto)
proto._setProtocolVersion(int(self['version']))
class JuiceError(Exception):
pass
class RemoteJuiceError(JuiceError):
"""
This error indicates that something went wrong on the remote end of the
connection, and the error was serialized and transmitted to you.
"""
def __init__(self, errorCode, description, fatal=False):
"""Create a remote error with an error code and description.
"""
Exception.__init__(self, "Remote[%s]: %s" % (errorCode, description))
self.errorCode = errorCode
self.description = description
self.fatal = fatal
class UnhandledRemoteJuiceError(RemoteJuiceError):
def __init__(self, description):
errorCode = b"UNHANDLED"
RemoteJuiceError.__init__(self, errorCode, description)
class JuiceBoxError(JuiceError):
pass
class MalformedJuiceBox(JuiceBoxError):
pass
class UnhandledCommand(JuiceError):
pass
class IncompatibleVersions(JuiceError):
pass
class _Transactor:
def __init__(self, store, callable):
self.store = store
self.callable = callable
def __call__(self, box):
return self.store.transact(self.callable, box)
def __repr__(self):
return '<Transaction in: %s of: %s>' % (self.store, self.callable)
class DispatchMixin:
baseDispatchPrefix = 'juice_'
autoDispatchPrefix = 'command_'
wrapper = None
def _auto(self, aCallable, proto, namespace=None):
if aCallable is None:
return None
command = aCallable.command
if namespace not in command.namespaces:
# if you're in the wrong namespace, you are very likely not allowed
# to invoke the command you are trying to invoke. some objects
# have commands exposed in a separate namespace for security
# reasons, since the security model is a role : namespace mapping.
log.msg('WRONG NAMESPACE: %r, %r' % (namespace, command.namespaces))
return None
def doit(box):
kw = stringsToObjects(box, command.arguments, proto)
for name, extraArg in command.extra:
kw[name] = extraArg.fromTransport(proto.transport)
# def checkIsDict(result):
# if not isinstance(result, dict):
# raise RuntimeError("%r returned %r, not dictionary" % (
# aCallable, result))
# return result
def checkKnownErrors(error):
key = error.trap(*command.allErrors)
code = command.allErrors[key]
desc = str(error.value)
return Failure(RemoteJuiceError(
code, desc, error in command.fatalErrors))
return maybeDeferred(aCallable, **kw).addCallback(
command.makeResponse, proto).addErrback(
checkKnownErrors)
return doit
def _wrap(self, aCallable):
if aCallable is None:
return None
wrap = self.wrapper
if wrap is not None:
return wrap(aCallable)
else:
return aCallable
def normalizeCommand(self, cmd):
"""Return the canonical form of a command.
"""
return cmd.upper().strip().replace('-', '_')
def lookupFunction(self, proto, name, namespace):
"""Return a callable to invoke when executing the named command.
"""
# Try to find a method to be invoked in a transaction first
# Otherwise fallback to a "regular" method
fName = self.autoDispatchPrefix + name
fObj = getattr(self, fName, None)
if fObj is not None:
# pass the namespace along
return self._auto(fObj, proto, namespace)
assert namespace is None, 'Old-style parsing'
# Fall back to simplistic command dispatching - we probably want to get
# rid of this eventually, there's no reason to do extra work and write
# fewer docs all the time.
fName = self.baseDispatchPrefix + name
return getattr(self, fName, None)
def dispatchCommand(self, proto, cmd, box, namespace=None):
fObj = self.lookupFunction(proto, self.normalizeCommand(cmd), namespace)
if fObj is None:
return fail(UnhandledCommand(cmd))
return maybeDeferred(self._wrap(fObj), box)
def normalizeKey(key):
lkey = six.ensure_str(key).lower().replace('-', '_')
if keyword.iskeyword(lkey):
return lkey.title()
return lkey
def parseJuiceHeaders(lines):
"""
Create a JuiceBox from a list of header lines.
@param lines: a list of lines.
@type lines: a list of L{bytes}
"""
b = JuiceBox()
key = None
for L in lines:
if L[0:1] == b' ':
# continuation
assert key is not None
b[key] += six.ensure_str(b'\r\n' + L[1:])
continue
parts = L.split(b': ', 1)
if len(parts) != 2:
raise MalformedJuiceBox("Wrong number of parts: %r" % (L,))
key, value = parts
key = normalizeKey(key)
b[key] = six.ensure_str(value)
return int(b.pop(LENGTH, 0)), b
class JuiceParserBase(DispatchMixin):
def __init__(self):
self._outstandingRequests = {}
def _puke(self, failure):
log.msg("Juice server or network failure "
"unhandled by client application:")
log.err(failure)
log.msg(
"Dropping connection! "
"To avoid, add errbacks to ALL remote commands!")
if self.transport is not None:
self.transport.loseConnection()
_counter = long(0)
def _nextTag(self):
self._counter += 1
return '%x' % (self._counter,)
def failAllOutgoing(self, reason):
OR = self._outstandingRequests.items()
self._outstandingRequests = None # we can never send another request
for key, value in OR:
value.errback(reason)
def juiceBoxReceived(self, box):
if debug:
log.msg("Juice receive: %s" % pprint.pformat(dict(six.viewitems(box))))
if ANSWER in box:
question = self._outstandingRequests.pop(box[ANSWER])
question.addErrback(self._puke)
self._wrap(question.callback)(box)
elif ERROR in box:
question = self._outstandingRequests.pop(box[ERROR])
question.addErrback(self._puke)
self._wrap(question.errback)(
Failure(RemoteJuiceError(box[ERROR_CODE],
box[ERROR_DESCRIPTION])))
elif COMMAND in box:
cmd = box[COMMAND]
def sendAnswer(answerBox):
if ASK not in box:
return
if self.transport is None:
return
answerBox[ANSWER] = box[ASK]
answerBox.sendTo(self)
def sendError(error):
if ASK not in box:
return error
if error.check(RemoteJuiceError):
code = error.value.errorCode
desc = error.value.description
if error.value.fatal:
errorBox = QuitBox()
else:
errorBox = JuiceBox()
else:
errorBox = QuitBox()
log.err(error) # here is where server-side logging happens
# if the error isn't handled
code = 'UNHANDLED'
desc = "Unhandled Remote System Exception "
errorBox[ERROR] = box[ASK]
errorBox[ERROR_DESCRIPTION] = desc
errorBox[ERROR_CODE] = code
if self.transport is not None:
errorBox.sendTo(self)
return None # intentionally stop the error here: don't log the
# traceback if it's handled, do log it (earlier) if
# it isn't
self.dispatchCommand(self, cmd, box).addCallbacks(sendAnswer, sendError
).addErrback(self._puke)
else:
raise RuntimeError(
"Empty packet received over connection-oriented juice: %r" % (box,))
def sendBoxCommand(self, command, box, requiresAnswer=True):
"""
Send a command across the wire with the given C{juice.Box}.
Returns a Deferred which fires with the response C{juice.Box} when it
is received, or fails with a C{juice.RemoteJuiceError} if an error is
received.
If the Deferred fails and the error is not handled by the caller of
this method, the failure will be logged and the connection dropped.
"""
if self._outstandingRequests is None:
return fail(CONNECTION_LOST)
box[COMMAND] = command
tag = self._nextTag()
if requiresAnswer:
box[ASK] = tag
result = self._outstandingRequests[tag] = Deferred()
else:
result = None
box.sendTo(self)
return result
class Argument:
optional = False
def __init__(self, optional=False):
self.optional = optional
def retrieve(self, d, name):
if self.optional:
value = d.get(name)
if value is not None:
del d[name]
else:
value = d.pop(name)
return value
def fromBox(self, name, strings, objects, proto):
st = self.retrieve(strings, name)
if self.optional and st is None:
objects[name] = None
else:
objects[name] = self.fromStringProto(st, proto)
def toBox(self, name, strings, objects, proto):
obj = self.retrieve(objects, name)
if self.optional and obj is None:
# strings[name] = None
return
else:
strings[name] = self.toStringProto(obj, proto)
def fromStringProto(self, inString, proto):
return self.fromString(inString)
def toStringProto(self, inObject, proto):
return self.toString(inObject)
def fromString(self, inString):
raise NotImplementedError()
def toString(self, inObject):
raise NotImplementedError()
class JuiceList(Argument):
def __init__(self, subargs):
self.subargs = subargs
def fromStringProto(self, inString, proto):
boxes = parseString(six.ensure_binary(inString))
values = [stringsToObjects(box, self.subargs, proto)
for box in boxes]
return values
def toStringProto(self, inObject, proto):
return b''.join([
objectsToStrings(objects, self.subargs, Box(), proto).serialize()
for objects in inObject
])
class ListOf(Argument):
def __init__(self, subarg, delimiter=', '):
self.subarg = subarg
self.delimiter = delimiter
def fromStringProto(self, inString, proto):
strings = inString.split(self.delimiter)
L = [self.subarg.fromStringProto(string, proto)
for string in strings]
return L
def toStringProto(self, inObject, proto):
L = []
for inSingle in inObject:
outString = self.subarg.toStringProto(inSingle, proto)
assert self.delimiter not in outString
L.append(outString)
return self.delimiter.join(L)
class Integer(Argument):
fromString = int
def toString(self, inObject):
return str(int(inObject))
class String(Argument):
def toString(self, inObject):
return inObject
def fromString(self, inString):
return inString
class EncodedString(Argument):
def __init__(self, encoding):
self.encoding = encoding
def toString(self, inObject):
return inObject.encode(self.encoding)
def fromString(self, inString):
return inString.decode(self.encoding)
# Temporary backwards compatibility for Exponent
Body = String
class Unicode(String):
def toString(self, inObject):
# assert isinstance(inObject, unicode)
return String.toString(self, inObject.encode('utf-8'))
def fromString(self, inString):
# assert isinstance(inString, str)
return String.fromString(self, inString).decode('utf-8')
class Path(Unicode):
def fromString(self, inString):
return filepath.FilePath(Unicode.fromString(self, inString))
def toString(self, inObject):
return Unicode.toString(self, inObject.path)
class Float(Argument):
fromString = float
toString = str
class Base64Binary(Argument):
def toString(self, inObject):
return inObject.encode('base64').replace('\n', '')
def fromString(self, inString):
return inString.decode('base64')
class Time(Argument):
def toString(self, inObject):
return inObject.asISO8601TimeAndDate()
def fromString(self, inString):
return extime.Time.fromISO8601TimeAndDate(inString)
class ExtraArg:
def fromTransport(self, inTransport):
raise NotImplementedError()
class Peer(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer()
class PeerDomain(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer().domain
class PeerUser(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QPeer().resource
class Host(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost()
class HostDomain(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost().domain
class HostUser(ExtraArg):
def fromTransport(self, inTransport):
return inTransport.getQ2QHost().resource
class Boolean(Argument):
def fromString(self, inString):
if inString == 'True':
return True
elif inString == 'False':
return False
else:
raise RuntimeError("Bad boolean value: %r" % (inString,))
def toString(self, inObject):
if inObject:
return 'True'
else:
return 'False'
class _CommandMeta(type):
def __new__(cls, name, bases, attrs):
re = attrs['reverseErrors'] = {}
er = attrs['allErrors'] = {}
for v, k in six.viewitems(attrs.get('errors',{})):
re[k] = v
er[v] = k
for v, k in six.viewitems(attrs.get('fatalErrors',{})):
re[k] = v
er[v] = k
return type.__new__(cls, name, bases, attrs)
@six.add_metaclass(_CommandMeta)
class Command:
arguments = []
response = []
extra = []
namespaces = [None] # This is set to [None] on purpose: None means
# "no namespace", not "empty list". "empty
# list" will make your command invalid in _all_
# namespaces, effectively uncallable.
errors = {}
fatalErrors = {}
commandType = Box
responseType = Box
def commandName():
def get(self):
return self.__class__.__name__
raise NotImplementedError("Missing command name")
return get,
commandName = property(*commandName())
def __init__(self, **kw):
self.structured = kw
givenArgs = [normalizeKey(k) for k in kw.keys()]
forgotten = []
for name, arg in self.arguments:
if normalizeKey(name) not in givenArgs and not arg.optional:
forgotten.append(normalizeKey(name))
# for v in kw.itervalues():
# if v is None:
# from pprint import pformat
# raise RuntimeError("ARGH: %s" % pformat(kw))
if forgotten:
if len(forgotten) == 1:
plural = 'an argument'
else:
plural = 'some arguments'
raise RuntimeError("You forgot %s to %r: %s" % (
plural, self.commandName, ', '.join(forgotten)))
forgotten = []
def makeResponse(cls, objects, proto):
try:
return objectsToStrings(objects, cls.response, cls.responseType(), proto)
except:
log.msg("Exception in %r.makeResponse" % (cls,))
raise
makeResponse = classmethod(makeResponse)
def do(self, proto, namespace=None, requiresAnswer=True):
if namespace is not None:
cmd = namespace + ":" + self.commandName
else:
cmd = self.commandName
def _massageError(error):
error.trap(RemoteJuiceError)
rje = error.value
return Failure(self.reverseErrors.get(rje.errorCode, UnhandledRemoteJuiceError)(rje.description))
d = proto.sendBoxCommand(
cmd, objectsToStrings(self.structured, self.arguments, self.commandType(),
proto),
requiresAnswer)
if requiresAnswer:
d.addCallback(stringsToObjects, self.response, proto)
d.addCallback(self.addExtra, proto.transport)
d.addErrback(_massageError)
return d
def addExtra(self, d, transport):
for name, extraArg in self.extra:
d[name] = extraArg.fromTransport(transport)
return d
class ProtocolSwitchCommand(Command):
"""Use this command to switch from something Juice-derived to a different
protocol mid-connection. This can be useful to use juice as the
connection-startup negotiation phase. Since TLS is a different layer
entirely, you can use Juice to negotiate the security parameters of your
connection, then switch to a different protocol, and the connection will
remain secured.
"""
def __init__(self, __protoToSwitchToFactory, **kw):
self.protoToSwitchToFactory = __protoToSwitchToFactory
super(ProtocolSwitchCommand, self).__init__(**kw)
def makeResponse(cls, innerProto, proto):
return _SwitchBox(innerProto)
makeResponse = classmethod(makeResponse)
def do(self, proto, namespace=None):
d = super(ProtocolSwitchCommand, self).do(proto)
proto._lock()
def switchNow(ign):
innerProto = self.protoToSwitchToFactory.buildProtocol(proto.transport.getPeer())
proto._switchTo(innerProto, self.protoToSwitchToFactory)
return ign
def die(ign):
proto.transport.loseConnection()
return ign
def handle(ign):
self.protoToSwitchToFactory.clientConnectionFailed(None, Failure(CONNECTION_LOST))
return ign
return d.addCallbacks(switchNow, handle).addErrback(die)
class Negotiate(Command):
commandName = 'Negotiate'
arguments = [('versions', ListOf(Integer()))]
response = [('version', Integer())]
responseType = NegotiateBox
class Juice(LineReceiver, JuiceParserBase, object):
"""
JUICE (JUice Is Concurrent Events) is a simple connection-oriented
request/response protocol. Packets, or "boxes", are collections of
RFC2822-inspired headers, plus a body. Note that this is NOT a literal
interpretation of any existing RFC, 822, 2822 or otherwise, but a simpler
version that does not do line continuations, does not specify any
particular format for header values, dispatches semantic meanings of most
headers on the -Command header rather than giving them global meaning, and
allows multiple sets of headers (messages, or JuiceBoxes) on a connection.
All headers whose names begin with a dash ('-') are reserved for use by the
protocol. All others are for application use - their meaning depends on
the value of the "-Command" header.
"""
protocolName = b'juice-base'
hostCertificate = None
MAX_LENGTH = 1024 * 1024
isServer = property(lambda self: self._issueGreeting,
doc="""
True if this is a juice server, e.g. it is going to
issue or has issued a server greeting upon
connection.
""")
isClient = property(lambda self: not self._issueGreeting,
doc="""
True if this is a juice server, e.g. it is not going to
issue or did not issue a server greeting upon
connection.
""")
def __init__(self, issueGreeting):
"""
@param issueGreeting: whether to issue a greeting when connected. This
should be set on server-side Juice protocols.
"""
JuiceParserBase.__init__(self)
self._issueGreeting = issueGreeting
def __repr__(self):
return '<%s %s/%s at 0x%x>' % (self.__class__.__name__, self.isClient and 'client' or 'server', self.innerProtocol, id(self))
__locked = False
def _lock(self):
""" Lock this Juice instance so that no further Juice traffic may be sent.
This is used when sending a request to switch underlying protocols.
You probably want to subclass ProtocolSwitchCommand rather than calling
this directly.
"""
self.__locked = True
innerProtocol = None
def _switchTo(self, newProto, clientFactory=None):
""" Switch this Juice instance to a new protocol. You need to do this
'simultaneously' on both ends of a connection; the easiest way to do
this is to use a subclass of ProtocolSwitchCommand.
"""
assert self.innerProtocol is None, "Protocol can only be safely switched once."
self.setRawMode()
self.innerProtocol = newProto
self.innerProtocolClientFactory = clientFactory
newProto.makeConnection(self.transport)
innerProtocolClientFactory = None
def juiceBoxReceived(self, box):
if self.__locked and COMMAND in box and ASK in box:
# This is a command which will trigger an answer, and we can no
# longer answer anything, so don't bother delivering it.
return
return super(Juice, self).juiceBoxReceived(box)
def sendPacket(self, completeBox):
"""
Send a juice.Box to my peer.
Note: transport.write is never called outside of this method.
"""
assert not self.__locked, "You cannot send juice packets when a connection is locked"
if self._startingTLSBuffer is not None:
self._startingTLSBuffer.append(completeBox)
else:
if debug:
log.msg("Juice send: %s" % pprint.pformat(dict(six.viewitems(completeBox))))
result = completeBox.serialize()
self.transport.write(result)
def sendCommand(self, command, __content='', __answer=True, **kw):
box = JuiceBox(__content, **kw)
return self.sendBoxCommand(command, box, requiresAnswer=__answer)
_outstandingRequests = None
_justStartedTLS = False
def makeConnection(self, transport):
self._transportPeer = transport.getPeer()
self._transportHost = transport.getHost()
log.msg("%s %s connection established (HOST:%s PEER:%s)" % (self.isClient and "client" or "server",
self.__class__.__name__,
self._transportHost,
self._transportPeer))
self._outstandingRequests = {}
self._requestBuffer = []
LineReceiver.makeConnection(self, transport)
_startingTLSBuffer = None
def prepareTLS(self):
self._startingTLSBuffer = []
def startTLS(self, certificate, *verifyAuthorities):
if self.hostCertificate is None:
self.hostCertificate = certificate
self._justStartedTLS = True
self.transport.startTLS(certificate.options(*verifyAuthorities))
stlsb = self._startingTLSBuffer
if stlsb is not None:
self._startingTLSBuffer = None
for box in stlsb:
self.sendPacket(box)
else:
raise RuntimeError(
"Previously authenticated connection between %s and %s "
"is trying to re-establish as %s" % (
self.hostCertificate,
Certificate.peerFromTransport(self.transport),
(certificate, verifyAuthorities)))
def dataReceived(self, data):
# If we successfully receive any data after TLS has been started, that
# means the connection was secured properly. Make a note of that fact.
if self._justStartedTLS:
self._justStartedTLS = False
return LineReceiver.dataReceived(self, data)
def connectionLost(self, reason):
log.msg("%s %s connection lost (HOST:%s PEER:%s)" % (
self.isClient and 'client' or 'server',
self.__class__.__name__,
self._transportHost,
self._transportPeer))
self.failAllOutgoing(reason)
if self.innerProtocol is not None:
self.innerProtocol.connectionLost(reason)
if self.innerProtocolClientFactory is not None:
self.innerProtocolClientFactory.clientConnectionLost(None, reason)
def lineReceived(self, line):
if line:
self._requestBuffer.append(line)
else:
buf = self._requestBuffer
self._requestBuffer = []
bodylen, b = parseJuiceHeaders(buf)
if bodylen:
self._bodyRemaining = bodylen
self._bodyBuffer = []
self._pendingBox = b
self.setRawMode()
else:
self.juiceBoxReceived(b)
def rawDataReceived(self, data):
if self.innerProtocol is not None:
self.innerProtocol.dataReceived(data)
return
self._bodyRemaining -= len(data)
if self._bodyRemaining <= 0:
if self._bodyRemaining < 0:
self._bodyBuffer.append(data[:self._bodyRemaining])
extraData = data[self._bodyRemaining:]
else:
self._bodyBuffer.append(data)
extraData = ''
self._pendingBox['body'] = six.ensure_str(b''.join(six.ensure_binary(each) for each in self._bodyBuffer))
self._bodyBuffer = None
b, self._pendingBox = self._pendingBox, None
self.juiceBoxReceived(b)
if self.innerProtocol is not None:
self.innerProtocol.makeConnection(self.transport)
if extraData:
self.innerProtocol.dataReceived(extraData)
else:
self.setLineMode(extraData)
else:
self._bodyBuffer.append(data)
protocolVersion = 0
def _setProtocolVersion(self, version):
# if we ever want to actually mangle encodings, this is the place to do
# it!
self.protocolVersion = version
return version
def renegotiateVersion(self, newVersion):
assert newVersion in VERSIONS, (
"This side of the connection doesn't support version %r"
% (newVersion,))
v = VERSIONS[:]
v.remove(newVersion)
return Negotiate(versions=[newVersion]).do(self).addCallback(
lambda ver: self._setProtocolVersion(ver['version']))
def command_NEGOTIATE(self, versions):
for version in versions:
if version in VERSIONS:
return dict(version=version)
raise IncompatibleVersions()
command_NEGOTIATE.command = Negotiate
VERSIONS = [1]
class _ParserHelper(Juice):
def __init__(self):
Juice.__init__(self, False)
self.boxes = []
self.results = Deferred()
def getPeer(self):
return 'string'
def getHost(self):
return 'string'
disconnecting = False
def juiceBoxReceived(self, box):
self.boxes.append(box)
# Synchronous helpers
def parse(cls, fileObj):
p = cls()
p.makeConnection(p)
p.dataReceived(fileObj.read())
return p.boxes
parse = classmethod(parse)
def parseString(cls, data):
with io.BytesIO(data) as f:
return cls.parse(f)
parseString = classmethod(parseString)
parse = _ParserHelper.parse
parseString = _ParserHelper.parseString
def stringsToObjects(strings, arglist, proto):
objects = {}
myStrings = strings.copy()
for argname, argparser in arglist:
argparser.fromBox(argname, myStrings, objects, proto)
return objects
def objectsToStrings(objects, arglist, strings, proto):
myObjects = {}
for (k, v) in objects.items():
myObjects[normalizeKey(k)] = v
for argname, argparser in arglist:
argparser.toBox(argname, strings, myObjects, proto)
return strings
class JuiceServerFactory(ServerFactory):
protocol = Juice
def buildProtocol(self, addr):
prot = self.protocol(True)
prot.factory = self
return prot
class JuiceClientFactory(ClientFactory):
protocol = Juice
def buildProtocol(self, addr):
prot = self.protocol(False)
prot.factory = self
return prot
| en | 0.879196 | # -*- test-case-name: epsilon.test.test_juice -*- # Copyright 2005 Divmod, Inc. See LICENSE file for details I am a packet in the JUICE protocol. Serialize and send this box to a Juice instance. By the time it is being sent, several keys are required. I must have exactly ONE of:: -ask -answer -error If the '-ask' header is set, then the '-command' header must also be set. # juice.Box => JuiceBox This error indicates that something went wrong on the remote end of the connection, and the error was serialized and transmitted to you. Create a remote error with an error code and description. # if you're in the wrong namespace, you are very likely not allowed # to invoke the command you are trying to invoke. some objects # have commands exposed in a separate namespace for security # reasons, since the security model is a role : namespace mapping. # def checkIsDict(result): # if not isinstance(result, dict): # raise RuntimeError("%r returned %r, not dictionary" % ( # aCallable, result)) # return result Return the canonical form of a command. Return a callable to invoke when executing the named command. # Try to find a method to be invoked in a transaction first # Otherwise fallback to a "regular" method # pass the namespace along # Fall back to simplistic command dispatching - we probably want to get # rid of this eventually, there's no reason to do extra work and write # fewer docs all the time. Create a JuiceBox from a list of header lines. @param lines: a list of lines. @type lines: a list of L{bytes} # continuation # we can never send another request # here is where server-side logging happens # if the error isn't handled # intentionally stop the error here: don't log the # traceback if it's handled, do log it (earlier) if # it isn't Send a command across the wire with the given C{juice.Box}. Returns a Deferred which fires with the response C{juice.Box} when it is received, or fails with a C{juice.RemoteJuiceError} if an error is received. If the Deferred fails and the error is not handled by the caller of this method, the failure will be logged and the connection dropped. # strings[name] = None # Temporary backwards compatibility for Exponent # assert isinstance(inObject, unicode) # assert isinstance(inString, str) # This is set to [None] on purpose: None means # "no namespace", not "empty list". "empty # list" will make your command invalid in _all_ # namespaces, effectively uncallable. # for v in kw.itervalues(): # if v is None: # from pprint import pformat # raise RuntimeError("ARGH: %s" % pformat(kw)) Use this command to switch from something Juice-derived to a different protocol mid-connection. This can be useful to use juice as the connection-startup negotiation phase. Since TLS is a different layer entirely, you can use Juice to negotiate the security parameters of your connection, then switch to a different protocol, and the connection will remain secured. JUICE (JUice Is Concurrent Events) is a simple connection-oriented request/response protocol. Packets, or "boxes", are collections of RFC2822-inspired headers, plus a body. Note that this is NOT a literal interpretation of any existing RFC, 822, 2822 or otherwise, but a simpler version that does not do line continuations, does not specify any particular format for header values, dispatches semantic meanings of most headers on the -Command header rather than giving them global meaning, and allows multiple sets of headers (messages, or JuiceBoxes) on a connection. All headers whose names begin with a dash ('-') are reserved for use by the protocol. All others are for application use - their meaning depends on the value of the "-Command" header. True if this is a juice server, e.g. it is going to issue or has issued a server greeting upon connection. True if this is a juice server, e.g. it is not going to issue or did not issue a server greeting upon connection. @param issueGreeting: whether to issue a greeting when connected. This should be set on server-side Juice protocols. Lock this Juice instance so that no further Juice traffic may be sent. This is used when sending a request to switch underlying protocols. You probably want to subclass ProtocolSwitchCommand rather than calling this directly. Switch this Juice instance to a new protocol. You need to do this 'simultaneously' on both ends of a connection; the easiest way to do this is to use a subclass of ProtocolSwitchCommand. # This is a command which will trigger an answer, and we can no # longer answer anything, so don't bother delivering it. Send a juice.Box to my peer. Note: transport.write is never called outside of this method. # If we successfully receive any data after TLS has been started, that # means the connection was secured properly. Make a note of that fact. # if we ever want to actually mangle encodings, this is the place to do # it! # Synchronous helpers | 2.285772 | 2 |
bonsai3/simulator_client.py | kirillpol-ms/bonsai3-py | 0 | 726 | <gh_stars>0
"""
Client for simulator requests
"""
__copyright__ = "Copyright 2020, Microsoft Corp."
# pyright: strict
from random import uniform
import time
from typing import Union
import jsons
import requests
from .exceptions import RetryTimeoutError, ServiceError
from .logger import Logger
from .simulator_protocol import (
ServiceConfig,
SimulatorEvent,
SimulatorEventRequest,
SimulatorInterface,
)
log = Logger()
_RETRYABLE_ERROR_CODES = {502, 503, 504}
_MAXIMUM_BACKOFF_SECONDS = 60
_BACKOFF_BASE_MULTIPLIER_MILLISECONDS = 50
class SimulatorClient:
def __init__(self, config: ServiceConfig):
self._config = config
self._retry_attempts = 0
self._retry_timeout = None
self._session = requests.session()
self._session.headers.update(
{"Authorization": config.access_key, "Content-type": "application/json"}
)
def register_simulator(self, interface: SimulatorInterface) -> SimulatorEvent:
return self._http_request(interface, self._config)
def get_next_event(self, event_request: SimulatorEventRequest) -> SimulatorEvent:
return self._http_request(event_request, self._config)
def unregister_simulator(self, session_id: str):
url = "{}/v2/workspaces/{}/simulatorSessions/{}".format(
self._config.server, self._config.workspace, session_id
)
log.debug("Sending unregister request to {}".format(url))
return self._session.delete(url, timeout=self._config.network_timeout_seconds)
def _http_request(
self,
payload: Union[SimulatorInterface, SimulatorEventRequest],
config: ServiceConfig,
) -> SimulatorEvent:
res = None
if self._retry_attempts >= 1:
self._handle_retry()
try:
# NOTE: we assert these for the user here to allow the config object to be partially initialized before use.
assert len(
config.access_key
), "Environment variable SIM_ACCESS_KEY is unset or access_key is empty."
assert len(
config.workspace
), "Environment variable SIM_WORKSPACE is unset or workspace is empty."
assert len(
config.server
), "Environment variable SIM_API_HOST is unset or server is empty."
# Register request
if isinstance(payload, SimulatorInterface):
reg_url = "{}/v2/workspaces/{}/simulatorSessions".format(
config.server, config.workspace
)
log.debug("Sending registration to {}".format(reg_url))
log.debug("Registration payload: {}".format(jsons.dumps(payload)))
res = self._session.post(
reg_url,
json=jsons.loads(payload.json),
headers={
"Authorization": config.access_key,
"Content-type": "application/json",
},
timeout=self._config.network_timeout_seconds,
)
log.debug("Response to registration received.")
# Get next event request
if isinstance(payload, SimulatorEventRequest):
log.network("Sending get next event request.")
res = self._session.post(
"{}/v2/workspaces/{}/simulatorSessions/{}/advance".format(
config.server, config.workspace, payload.sessionId
),
json=jsons.loads(jsons.dumps(payload)),
headers={
"Authorization": config.access_key,
"Content-type": "application/json",
},
timeout=self._config.network_timeout_seconds,
)
log.network("Response to get next event request received.")
except requests.exceptions.Timeout as err:
log.error(err)
self._retry_attempts += 1
return self._http_request(payload, config)
except requests.exceptions.RequestException as err:
if res is not None:
log.error(res.text)
log.error(err)
raise
if res is not None:
if res.status_code in _RETRYABLE_ERROR_CODES:
log.debug(
"Service returned {}, a retryable response error code."
" Retrying request.".format(res.status_code)
)
self._retry_attempts += 1
return self._http_request(payload, config)
# bail on error
if res.status_code != 200 and res.status_code != 201:
log.error(
"Received response with {} http status code. "
"Raising exception.".format(res.status_code)
)
if res.text:
log.error(res.text)
raise ServiceError(
"Unable to get next event for simulator, "
"received {} http status code".format(res.status_code)
)
# TODO estee: this needs validation
# SimulatorEvent
self._retry_attempts = 0
self._retry_timeout = None
return self._event_from_json(res.text)
raise RuntimeError(
"Usage error: Somehow http response ended up as none. "
"Check arguments to _http_request and ensure the payload "
"is either of type SimulatorInterface or SimulatorEventRequest"
)
def _event_from_json(self, json_text: str) -> SimulatorEvent:
"""Converts a json string into a SimulatorEvent."""
event_dict = jsons.loads(json_text)
log.debug("Event Response: {}".format(event_dict))
return SimulatorEvent(event_dict)
def _handle_retry(self):
log.network("handling retry.")
if (
self._retry_timeout and time.time() > self._retry_timeout
) or self._config.retry_timeout_seconds == 0:
raise RetryTimeoutError("Simulator Retry time exceeded.")
if self._config.retry_timeout_seconds > 0 and self._retry_timeout is None:
self._retry_timeout = time.time() + self._config.retry_timeout_seconds
log.info(
"Simulator will timeout in {} seconds if it is not able "
"to connect to the platform.".format(self._retry_timeout - time.time())
)
self._backoff()
log.network("retry handled.")
def _backoff(self):
"""
Implements Exponential backoff algorithm with full jitter
Check the following url for more information
https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
power_of_two = 2 ** self._retry_attempts
max_sleep = min(
power_of_two * _BACKOFF_BASE_MULTIPLIER_MILLISECONDS / 1000.0,
_MAXIMUM_BACKOFF_SECONDS,
)
sleep = uniform(0, max_sleep)
log.debug(
"Retry attempt: {}, backing off for {} seconds".format(
self._retry_attempts, sleep
)
)
time.sleep(sleep)
| """
Client for simulator requests
"""
__copyright__ = "Copyright 2020, Microsoft Corp."
# pyright: strict
from random import uniform
import time
from typing import Union
import jsons
import requests
from .exceptions import RetryTimeoutError, ServiceError
from .logger import Logger
from .simulator_protocol import (
ServiceConfig,
SimulatorEvent,
SimulatorEventRequest,
SimulatorInterface,
)
log = Logger()
_RETRYABLE_ERROR_CODES = {502, 503, 504}
_MAXIMUM_BACKOFF_SECONDS = 60
_BACKOFF_BASE_MULTIPLIER_MILLISECONDS = 50
class SimulatorClient:
def __init__(self, config: ServiceConfig):
self._config = config
self._retry_attempts = 0
self._retry_timeout = None
self._session = requests.session()
self._session.headers.update(
{"Authorization": config.access_key, "Content-type": "application/json"}
)
def register_simulator(self, interface: SimulatorInterface) -> SimulatorEvent:
return self._http_request(interface, self._config)
def get_next_event(self, event_request: SimulatorEventRequest) -> SimulatorEvent:
return self._http_request(event_request, self._config)
def unregister_simulator(self, session_id: str):
url = "{}/v2/workspaces/{}/simulatorSessions/{}".format(
self._config.server, self._config.workspace, session_id
)
log.debug("Sending unregister request to {}".format(url))
return self._session.delete(url, timeout=self._config.network_timeout_seconds)
def _http_request(
self,
payload: Union[SimulatorInterface, SimulatorEventRequest],
config: ServiceConfig,
) -> SimulatorEvent:
res = None
if self._retry_attempts >= 1:
self._handle_retry()
try:
# NOTE: we assert these for the user here to allow the config object to be partially initialized before use.
assert len(
config.access_key
), "Environment variable SIM_ACCESS_KEY is unset or access_key is empty."
assert len(
config.workspace
), "Environment variable SIM_WORKSPACE is unset or workspace is empty."
assert len(
config.server
), "Environment variable SIM_API_HOST is unset or server is empty."
# Register request
if isinstance(payload, SimulatorInterface):
reg_url = "{}/v2/workspaces/{}/simulatorSessions".format(
config.server, config.workspace
)
log.debug("Sending registration to {}".format(reg_url))
log.debug("Registration payload: {}".format(jsons.dumps(payload)))
res = self._session.post(
reg_url,
json=jsons.loads(payload.json),
headers={
"Authorization": config.access_key,
"Content-type": "application/json",
},
timeout=self._config.network_timeout_seconds,
)
log.debug("Response to registration received.")
# Get next event request
if isinstance(payload, SimulatorEventRequest):
log.network("Sending get next event request.")
res = self._session.post(
"{}/v2/workspaces/{}/simulatorSessions/{}/advance".format(
config.server, config.workspace, payload.sessionId
),
json=jsons.loads(jsons.dumps(payload)),
headers={
"Authorization": config.access_key,
"Content-type": "application/json",
},
timeout=self._config.network_timeout_seconds,
)
log.network("Response to get next event request received.")
except requests.exceptions.Timeout as err:
log.error(err)
self._retry_attempts += 1
return self._http_request(payload, config)
except requests.exceptions.RequestException as err:
if res is not None:
log.error(res.text)
log.error(err)
raise
if res is not None:
if res.status_code in _RETRYABLE_ERROR_CODES:
log.debug(
"Service returned {}, a retryable response error code."
" Retrying request.".format(res.status_code)
)
self._retry_attempts += 1
return self._http_request(payload, config)
# bail on error
if res.status_code != 200 and res.status_code != 201:
log.error(
"Received response with {} http status code. "
"Raising exception.".format(res.status_code)
)
if res.text:
log.error(res.text)
raise ServiceError(
"Unable to get next event for simulator, "
"received {} http status code".format(res.status_code)
)
# TODO estee: this needs validation
# SimulatorEvent
self._retry_attempts = 0
self._retry_timeout = None
return self._event_from_json(res.text)
raise RuntimeError(
"Usage error: Somehow http response ended up as none. "
"Check arguments to _http_request and ensure the payload "
"is either of type SimulatorInterface or SimulatorEventRequest"
)
def _event_from_json(self, json_text: str) -> SimulatorEvent:
"""Converts a json string into a SimulatorEvent."""
event_dict = jsons.loads(json_text)
log.debug("Event Response: {}".format(event_dict))
return SimulatorEvent(event_dict)
def _handle_retry(self):
log.network("handling retry.")
if (
self._retry_timeout and time.time() > self._retry_timeout
) or self._config.retry_timeout_seconds == 0:
raise RetryTimeoutError("Simulator Retry time exceeded.")
if self._config.retry_timeout_seconds > 0 and self._retry_timeout is None:
self._retry_timeout = time.time() + self._config.retry_timeout_seconds
log.info(
"Simulator will timeout in {} seconds if it is not able "
"to connect to the platform.".format(self._retry_timeout - time.time())
)
self._backoff()
log.network("retry handled.")
def _backoff(self):
"""
Implements Exponential backoff algorithm with full jitter
Check the following url for more information
https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
power_of_two = 2 ** self._retry_attempts
max_sleep = min(
power_of_two * _BACKOFF_BASE_MULTIPLIER_MILLISECONDS / 1000.0,
_MAXIMUM_BACKOFF_SECONDS,
)
sleep = uniform(0, max_sleep)
log.debug(
"Retry attempt: {}, backing off for {} seconds".format(
self._retry_attempts, sleep
)
)
time.sleep(sleep) | en | 0.80256 | Client for simulator requests # pyright: strict # NOTE: we assert these for the user here to allow the config object to be partially initialized before use. # Register request # Get next event request # bail on error # TODO estee: this needs validation # SimulatorEvent Converts a json string into a SimulatorEvent. Implements Exponential backoff algorithm with full jitter Check the following url for more information https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ | 2.154489 | 2 |
perturbed_images_generation_multiProcess.py | gwxie/Synthesize-Distorted-Image-and-Its-Control-Points | 8 | 727 | '''
<NAME>
set up :2020-1-9
intergrate img and label into one file
-- fiducial1024_v1
'''
import argparse
import sys, os
import pickle
import random
import collections
import json
import numpy as np
import scipy.io as io
import scipy.misc as m
import matplotlib.pyplot as plt
import glob
import math
import time
import threading
import multiprocessing as mp
from multiprocessing import Pool
import re
import cv2
# sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN
import utils
def getDatasets(dir):
return os.listdir(dir)
class perturbed(utils.BasePerturbed):
def __init__(self, path, bg_path, save_path, save_suffix):
self.path = path
self.bg_path = bg_path
self.save_path = save_path
self.save_suffix = save_suffix
def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'):
origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR)
save_img_shape = [512*2, 480*2] # 320
# reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1])
reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])
# reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18])
# reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09])
base_img_shrink = save_img_shape[0] - reduce_value
# enlarge_img_shrink = [1024, 768]
# enlarge_img_shrink = [896, 672] # 420
enlarge_img_shrink = [512*4, 480*4] # 420
# enlarge_img_shrink = [896*2, 768*2] # 420
# enlarge_img_shrink = [896, 768] # 420
# enlarge_img_shrink = [768, 576] # 420
# enlarge_img_shrink = [640, 480] # 420
''''''
im_lr = origin_img.shape[0]
im_ud = origin_img.shape[1]
reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
# reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14])
if im_lr > im_ud:
im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2)
im_lr = save_img_shape[0] - reduce_value
else:
base_img_shrink = save_img_shape[1] - reduce_value
im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2)
im_ud = base_img_shrink
if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5:
repeat_time = min(repeat_time, 8)
edge_padding = 3
im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1
im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1
im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64)
im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64)
# im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1
# im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1
# im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64)
# im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64)
im_x, im_y = np.meshgrid(im_hight, im_wide)
segment_x = (im_lr) // (fiducial_points-1)
segment_y = (im_ud) // (fiducial_points-1)
# plt.plot(im_x, im_y,
# color='limegreen',
# marker='.',
# linestyle='')
# plt.grid(True)
# plt.show()
self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)
perturbed_bg_ = getDatasets(self.bg_path)
perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_)
perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)
mesh_shape = self.origin_img.shape[:2]
self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img)
# self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img)
self.new_shape = self.synthesis_perturbed_img.shape[:2]
perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA)
origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2)
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2))
# self.perturbed_xy_ = pixel_position.copy().astype(np.float32)
# fiducial_points_grid = origin_pixel_position[im_x, im_y]
self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2))
x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape)
origin_pixel_position += [x_min, y_min]
x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1])
x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16)
y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16)
x_min += x_shift
x_max += x_shift
y_min += y_shift
y_max += y_shift
'''im_x,y'''
im_x += x_min
im_y += y_min
self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img
self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position
synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy()
synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy()
foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16)
foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16)
foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label
# synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max)
# synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max)
'''*****************************************************************'''
is_normalizationFun_mixture = self.is_perform(0.2, 0.8)
# if not is_normalizationFun_mixture:
normalizationFun_0_1 = False
# normalizationFun_0_1 = self.is_perform(0.5, 0.5)
if fold_curve == 'fold':
fold_curve_random = True
# is_normalizationFun_mixture = False
normalizationFun_0_1 = self.is_perform(0.2, 0.8)
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99)
alpha_perturbed = random.randint(80, 160) / 100
# is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99)
synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
# synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16)
synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
alpha_perturbed_change = self.is_perform(0.5, 0.5)
p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9)
for repeat_i in range(repeat_time):
if alpha_perturbed_change:
if fold_curve == 'fold':
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
alpha_perturbed = random.randint(80, 160) / 100
''''''
linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1,
self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1]
linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1,
self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1]
linspace_x_seq = [1, 2, 3]
linspace_y_seq = [1, 2, 3]
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_p = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice:
linspace_x_seq.remove(r_x)
linspace_y_seq.remove(r_y)
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_pp = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
# perturbed_p, perturbed_pp = np.array(
# [random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10]) \
# , np.array([random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10])
# perturbed_p, perturbed_pp = np.array(
# [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \
# , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10])
''''''
perturbed_vp = perturbed_pp - perturbed_p
perturbed_vp_norm = np.linalg.norm(perturbed_vp)
perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm
''''''
# perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100])
# perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100])
if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7):
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100])
# perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100])
else:
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
# perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100])
perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100])
# perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100])
# perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10])
''''''
if fold_curve == 'fold':
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
''''''
if fold_curve_random:
# omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed)
# omega_perturbed = alpha_perturbed**perturbed_d
omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed)
else:
omega_perturbed = 1 - perturbed_d ** alpha_perturbed
'''shadow'''
if self.is_perform(0.6, 0.4):
synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255)
''''''
if relativeShift_position in ['position', 'relativeShift_v2']:
self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0)
else:
print('relativeShift_position error')
exit()
'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
# synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8)
synthesis_perturbed_label[:, :, 0] *= foreORbackground_label
synthesis_perturbed_label[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 0] *= foreORbackground_label
synthesis_perturbed_img[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 2] *= foreORbackground_label
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
'''
'''perspective'''
perspective_shreshold = random.randint(26, 36)*10 # 280
x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold)
pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]])
e_1_ = x_max_per - x_min_per
e_2_ = y_max_per - y_min_per
e_3_ = e_2_
e_4_ = e_1_
perspective_shreshold_h = e_1_*0.02
perspective_shreshold_w = e_2_*0.02
a_min_, a_max_ = 70, 110
# if self.is_perform(1, 0):
if fold_curve == 'curve' and self.is_perform(0.5, 0.5):
if self.is_perform(0.5, 0.5):
while True:
pts2 = np.around(
np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(
np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
M = cv2.getPerspectiveTransform(pts1, pts2)
one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16)
matr = np.dstack((pixel_position, one))
new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3)
x = new[:, :, 0]/new[:, :, 2]
y = new[:, :, 1]/new[:, :, 2]
perturbed_xy_ = np.dstack((x, y))
# perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75))
# perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17)))
# perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17))
# perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0)
perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1)
# perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16)
self.perturbed_xy_ += perturbed_xy_
'''perspective end'''
'''to img'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
# self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7))
self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0)
'''get fiducial points'''
fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y]
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
self.foreORbackground_label = foreORbackground_label
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img)
'''
'''clip'''
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]:
raise Exception('clip error')
if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2:
raise Exception('clip error')
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
is_shrink = False
if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]:
is_shrink = True
synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
'''shrink fiducial points'''
center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
fiducial_points_coordinate_copy = fiducial_points_coordinate.copy()
shrink_x = im_lr/(perturbed_x_max - perturbed_x_min)
shrink_y = im_ud/(perturbed_y_max - perturbed_y_min)
fiducial_points_coordinate *= [shrink_x, shrink_y]
center_x_l *= shrink_x
center_y_l *= shrink_y
# fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y]
# fiducial_points_coordinate[1:, :1, 0] *= shrink_x
# fiducial_points_coordinate[:1, 1:, 1] *= shrink_y
# perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
self.synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
self.foreORbackground_label = np.zeros_like(self.foreORbackground_label)
self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_img
self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_label
self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max] = foreORbackground_label
center_x, center_y = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
if is_shrink:
fiducial_points_coordinate += [center_x-center_x_l, center_y-center_y_l]
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img,
(l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img)
'''
self.new_shape = save_img_shape
self.synthesis_perturbed_img = self.synthesis_perturbed_img[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.foreORbackground_label = self.foreORbackground_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2].copy()
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
'''clip
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
self.new_shape = save_img_shape
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy()
'''
'''save'''
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
if relativeShift_position == 'relativeShift_v2':
self.synthesis_perturbed_label -= pixel_position
fiducial_points_coordinate -= [center_x - self.new_shape[0] // 2, center_y - self.new_shape[1] // 2]
self.synthesis_perturbed_label[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_label[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 2] *= self.foreORbackground_label
'''
synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
# if self.is_perform(0.9, 0.1) or repeat_time > 5:
# # if self.is_perform(0.1, 0.9) and repeat_time > 9:
# # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0)
# # else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
# else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1]
'''
'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img
HSV
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20
perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
# synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771]
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
'''
'''HSV_v2'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
# if self.is_perform(1, 0):
# if self.is_perform(1, 0):
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = self.HSV_v1(perturbed_bg_img_HSV)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
''''''
# cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip)
self.synthesis_perturbed_img[self.synthesis_perturbed_img < 0] = 0
self.synthesis_perturbed_img[self.synthesis_perturbed_img > 255] = 255
self.synthesis_perturbed_img = np.around(self.synthesis_perturbed_img).astype(np.uint8)
label = np.zeros_like(self.synthesis_perturbed_img, dtype=np.float32)
label[:, :, :2] = self.synthesis_perturbed_label
label[:, :, 2] = self.foreORbackground_label
# grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16)
# synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2)
synthesis_perturbed_color = np.concatenate((self.synthesis_perturbed_img, label), axis=2)
self.synthesis_perturbed_color = np.zeros_like(synthesis_perturbed_color, dtype=np.float32)
# self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32)
reduce_value_x = int(round(min((random.random() / 2) * (self.new_shape[0] - (perturbed_x_max - perturbed_x_min)), min(reduce_value, reduce_value_v2))))
reduce_value_y = int(round(min((random.random() / 2) * (self.new_shape[1] - (perturbed_y_max - perturbed_y_min)), min(reduce_value, reduce_value_v2))))
perturbed_x_min = max(perturbed_x_min - reduce_value_x, 0)
perturbed_x_max = min(perturbed_x_max + reduce_value_x, self.new_shape[0])
perturbed_y_min = max(perturbed_y_min - reduce_value_y, 0)
perturbed_y_max = min(perturbed_y_max + reduce_value_y, self.new_shape[1])
if im_lr >= im_ud:
self.synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :]
# self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :]
else:
self.synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :]
# self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :]
'''blur'''
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = self.synthesis_perturbed_color[:, :, :3].copy()
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
else:
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
if self.is_perform(0.5, 0.5):
self.synthesis_perturbed_color[:, :, :3][self.synthesis_perturbed_color[:, :, 5] == 1] = synthesis_perturbed_img_filter[self.synthesis_perturbed_color[:, :, 5] == 1]
else:
self.synthesis_perturbed_color[:, :, :3] = synthesis_perturbed_img_filter
fiducial_points_coordinate = fiducial_points_coordinate[:, :, ::-1]
'''draw fiducial points'''
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_color[:, :, :3].copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[0] + math.ceil(stepSize / 2), l[1] + math.ceil(stepSize / 2)), 2, (0, 0, 255), -1)
cv2.imwrite(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png', fiducial_points_synthesis_perturbed_img)
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
'''forward-begin'''
self.forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_position = (self.synthesis_perturbed_color[:, :, 3:5] + pixel_position)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
flat_position = np.argwhere(np.zeros(save_img_shape, dtype=np.uint32) == 0)
vtx, wts = self.interp_weights(forward_position, flat_position)
wts_sum = np.abs(wts).sum(-1)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
flat_position_forward = flat_position.reshape(save_img_shape[0], save_img_shape[1], 2)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
forward_mapping.reshape(save_img_shape[0] * save_img_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(flat_position_forward, vtx, wts)
forward_mapping = forward_mapping.reshape(save_img_shape[0], save_img_shape[1], 2)
mapping_x_min_, mapping_y_min_, mapping_x_max_, mapping_y_max_ = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
shreshold_zoom_out = 2
mapping_x_min = mapping_x_min_ + shreshold_zoom_out
mapping_y_min = mapping_y_min_ + shreshold_zoom_out
mapping_x_max = mapping_x_max_ - shreshold_zoom_out
mapping_y_max = mapping_y_max_ - shreshold_zoom_out
self.forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max] = forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max]
self.scan_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
self.scan_img[mapping_x_min_:mapping_x_max_, mapping_y_min_:mapping_y_max_] = self.origin_img
self.origin_img = self.scan_img
# flat_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
# cv2.remap(self.synthesis_perturbed_color[:, :, :3], self.forward_mapping[:, :, 1], self.forward_mapping[:, :, 0], cv2.INTER_LINEAR, flat_img)
# cv2.imwrite(self.save_path + 'outputs/1.jpg', flat_img)
'''forward-end'''
synthesis_perturbed_data = {
'fiducial_points': fiducial_points_coordinate,
'segment': np.array((segment_x, segment_y))
}
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
with open(self.save_path+'color/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
pickle_perturbed_data = pickle.dumps(synthesis_perturbed_data)
f.write(pickle_perturbed_data)
# with open(self.save_path+'grey/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
# pickle_perturbed_data = pickle.dumps(self.synthesis_perturbed_grey)
# f.write(pickle_perturbed_data)
# cv2.imwrite(self.save_path+'grey_im/'+perfix_+'_'+fold_curve+'.png', self.synthesis_perturbed_color[:, :, :1])
# cv2.imwrite(self.save_path + 'scan/' + self.save_suffix + '_' + str(m) + '.png', self.origin_img)
trian_t = time.time() - begin_train
mm, ss = divmod(trian_t, 60)
hh, mm = divmod(mm, 60)
print(str(m)+'_'+str(n)+'_'+fold_curve+' '+str(repeat_time)+" Time : %02d:%02d:%02d\n" % (hh, mm, ss))
def multiThread(m, n, img_path_, bg_path_, save_path, save_suffix):
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(10, 3)), 5), 16)
fold = threading.Thread(target=saveFold.save_img, args=(m, n, 'fold', repeat_time, 'relativeShift_v2'), name='fold')
curve = threading.Thread(target=saveCurve.save_img, args=(m, n, 'curve', repeat_time, 'relativeShift_v2'), name='curve')
fold.start()
curve.start()
curve.join()
fold.join()
def xgw(args):
path = args.path
bg_path = args.bg_path
if args.output_path is None:
save_path = '/lustre/home/gwxie/data/unwarp_new/train/general1024/general1024_v1/'
else:
save_path = args.output_path
# if not os.path.exists(save_path + 'grey/'):
# os.makedirs(save_path + 'grey/')
if not os.path.exists(save_path + 'color/'):
os.makedirs(save_path + 'color/')
if not os.path.exists(save_path + 'fiducial_points/'):
os.makedirs(save_path + 'fiducial_points/')
if not os.path.exists(save_path + 'png/'):
os.makedirs(save_path + 'png/')
if not os.path.exists(save_path + 'scan/'):
os.makedirs(save_path + 'scan/')
if not os.path.exists(save_path + 'outputs/'):
os.makedirs(save_path + 'outputs/')
save_suffix = str.split(args.path, '/')[-2]
all_img_path = getDatasets(path)
all_bgImg_path = getDatasets(bg_path)
global begin_train
begin_train = time.time()
fiducial_points = 61 # 31
process_pool = Pool(2)
for m, img_path in enumerate(all_img_path):
for n in range(args.sys_num):
img_path_ = path+img_path
bg_path_ = bg_path+random.choice(all_bgImg_path)+'/'
for m_n in range(10):
try:
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(12, 4)), 1), 18)
# repeat_time = min(max(round(np.random.normal(8, 4)), 1), 12) # random.randint(1, 2) # min(max(round(np.random.normal(8, 4)), 1), 12)
process_pool.apply_async(func=saveFold.save_img, args=(m, n, 'fold', repeat_time, fiducial_points, 'relativeShift_v2'))
repeat_time = min(max(round(np.random.normal(8, 4)), 1), 13)
# repeat_time = min(max(round(np.random.normal(6, 4)), 1), 10)
process_pool.apply_async(func=saveCurve.save_img, args=(m, n, 'curve', repeat_time, fiducial_points, 'relativeShift_v2'))
except BaseException as err:
print(err)
continue
break
# print('end')
process_pool.close()
process_pool.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--path',
default='./scan/new/', type=str,
help='the path of origin img.')
parser.add_argument('--bg_path',
default='./background/', type=str,
help='the path of bg img.')
parser.add_argument('--output_path',
default='./output/', type=str,
help='the path of origin img.')
# parser.set_defaults(output_path='test')
parser.add_argument('--count_from', '-p', default=0, type=int,
metavar='N', help='print frequency (default: 10)') # print frequency
parser.add_argument('--repeat_T', default=0, type=int)
parser.add_argument('--sys_num', default=6, type=int)
args = parser.parse_args()
xgw(args)
| '''
<NAME>
set up :2020-1-9
intergrate img and label into one file
-- fiducial1024_v1
'''
import argparse
import sys, os
import pickle
import random
import collections
import json
import numpy as np
import scipy.io as io
import scipy.misc as m
import matplotlib.pyplot as plt
import glob
import math
import time
import threading
import multiprocessing as mp
from multiprocessing import Pool
import re
import cv2
# sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN
import utils
def getDatasets(dir):
return os.listdir(dir)
class perturbed(utils.BasePerturbed):
def __init__(self, path, bg_path, save_path, save_suffix):
self.path = path
self.bg_path = bg_path
self.save_path = save_path
self.save_suffix = save_suffix
def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'):
origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR)
save_img_shape = [512*2, 480*2] # 320
# reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1])
reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])
# reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18])
# reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09])
base_img_shrink = save_img_shape[0] - reduce_value
# enlarge_img_shrink = [1024, 768]
# enlarge_img_shrink = [896, 672] # 420
enlarge_img_shrink = [512*4, 480*4] # 420
# enlarge_img_shrink = [896*2, 768*2] # 420
# enlarge_img_shrink = [896, 768] # 420
# enlarge_img_shrink = [768, 576] # 420
# enlarge_img_shrink = [640, 480] # 420
''''''
im_lr = origin_img.shape[0]
im_ud = origin_img.shape[1]
reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
# reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14])
if im_lr > im_ud:
im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2)
im_lr = save_img_shape[0] - reduce_value
else:
base_img_shrink = save_img_shape[1] - reduce_value
im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2)
im_ud = base_img_shrink
if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5:
repeat_time = min(repeat_time, 8)
edge_padding = 3
im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1
im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1
im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64)
im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64)
# im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1
# im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1
# im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64)
# im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64)
im_x, im_y = np.meshgrid(im_hight, im_wide)
segment_x = (im_lr) // (fiducial_points-1)
segment_y = (im_ud) // (fiducial_points-1)
# plt.plot(im_x, im_y,
# color='limegreen',
# marker='.',
# linestyle='')
# plt.grid(True)
# plt.show()
self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)
perturbed_bg_ = getDatasets(self.bg_path)
perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_)
perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)
mesh_shape = self.origin_img.shape[:2]
self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img)
# self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img)
self.new_shape = self.synthesis_perturbed_img.shape[:2]
perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA)
origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2)
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2))
# self.perturbed_xy_ = pixel_position.copy().astype(np.float32)
# fiducial_points_grid = origin_pixel_position[im_x, im_y]
self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2))
x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape)
origin_pixel_position += [x_min, y_min]
x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1])
x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16)
y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16)
x_min += x_shift
x_max += x_shift
y_min += y_shift
y_max += y_shift
'''im_x,y'''
im_x += x_min
im_y += y_min
self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img
self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position
synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy()
synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy()
foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16)
foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16)
foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label
# synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max)
# synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max)
'''*****************************************************************'''
is_normalizationFun_mixture = self.is_perform(0.2, 0.8)
# if not is_normalizationFun_mixture:
normalizationFun_0_1 = False
# normalizationFun_0_1 = self.is_perform(0.5, 0.5)
if fold_curve == 'fold':
fold_curve_random = True
# is_normalizationFun_mixture = False
normalizationFun_0_1 = self.is_perform(0.2, 0.8)
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99)
alpha_perturbed = random.randint(80, 160) / 100
# is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99)
synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
# synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16)
synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
alpha_perturbed_change = self.is_perform(0.5, 0.5)
p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9)
for repeat_i in range(repeat_time):
if alpha_perturbed_change:
if fold_curve == 'fold':
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
alpha_perturbed = random.randint(80, 160) / 100
''''''
linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1,
self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1]
linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1,
self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1]
linspace_x_seq = [1, 2, 3]
linspace_y_seq = [1, 2, 3]
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_p = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice:
linspace_x_seq.remove(r_x)
linspace_y_seq.remove(r_y)
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_pp = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
# perturbed_p, perturbed_pp = np.array(
# [random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10]) \
# , np.array([random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10])
# perturbed_p, perturbed_pp = np.array(
# [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \
# , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10])
''''''
perturbed_vp = perturbed_pp - perturbed_p
perturbed_vp_norm = np.linalg.norm(perturbed_vp)
perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm
''''''
# perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100])
# perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100])
if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7):
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100])
# perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100])
else:
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
# perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100])
perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100])
# perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100])
# perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10])
''''''
if fold_curve == 'fold':
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
''''''
if fold_curve_random:
# omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed)
# omega_perturbed = alpha_perturbed**perturbed_d
omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed)
else:
omega_perturbed = 1 - perturbed_d ** alpha_perturbed
'''shadow'''
if self.is_perform(0.6, 0.4):
synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255)
''''''
if relativeShift_position in ['position', 'relativeShift_v2']:
self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0)
else:
print('relativeShift_position error')
exit()
'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
# synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8)
synthesis_perturbed_label[:, :, 0] *= foreORbackground_label
synthesis_perturbed_label[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 0] *= foreORbackground_label
synthesis_perturbed_img[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 2] *= foreORbackground_label
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
'''
'''perspective'''
perspective_shreshold = random.randint(26, 36)*10 # 280
x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold)
pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]])
e_1_ = x_max_per - x_min_per
e_2_ = y_max_per - y_min_per
e_3_ = e_2_
e_4_ = e_1_
perspective_shreshold_h = e_1_*0.02
perspective_shreshold_w = e_2_*0.02
a_min_, a_max_ = 70, 110
# if self.is_perform(1, 0):
if fold_curve == 'curve' and self.is_perform(0.5, 0.5):
if self.is_perform(0.5, 0.5):
while True:
pts2 = np.around(
np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(
np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
M = cv2.getPerspectiveTransform(pts1, pts2)
one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16)
matr = np.dstack((pixel_position, one))
new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3)
x = new[:, :, 0]/new[:, :, 2]
y = new[:, :, 1]/new[:, :, 2]
perturbed_xy_ = np.dstack((x, y))
# perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75))
# perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17)))
# perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17))
# perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0)
perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1)
# perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16)
self.perturbed_xy_ += perturbed_xy_
'''perspective end'''
'''to img'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
# self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7))
self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0)
'''get fiducial points'''
fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y]
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
self.foreORbackground_label = foreORbackground_label
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img)
'''
'''clip'''
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]:
raise Exception('clip error')
if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2:
raise Exception('clip error')
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
is_shrink = False
if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]:
is_shrink = True
synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
'''shrink fiducial points'''
center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
fiducial_points_coordinate_copy = fiducial_points_coordinate.copy()
shrink_x = im_lr/(perturbed_x_max - perturbed_x_min)
shrink_y = im_ud/(perturbed_y_max - perturbed_y_min)
fiducial_points_coordinate *= [shrink_x, shrink_y]
center_x_l *= shrink_x
center_y_l *= shrink_y
# fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y]
# fiducial_points_coordinate[1:, :1, 0] *= shrink_x
# fiducial_points_coordinate[:1, 1:, 1] *= shrink_y
# perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
self.synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
self.foreORbackground_label = np.zeros_like(self.foreORbackground_label)
self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_img
self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_label
self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max] = foreORbackground_label
center_x, center_y = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
if is_shrink:
fiducial_points_coordinate += [center_x-center_x_l, center_y-center_y_l]
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img,
(l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img)
'''
self.new_shape = save_img_shape
self.synthesis_perturbed_img = self.synthesis_perturbed_img[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.foreORbackground_label = self.foreORbackground_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2].copy()
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
'''clip
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
self.new_shape = save_img_shape
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy()
'''
'''save'''
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
if relativeShift_position == 'relativeShift_v2':
self.synthesis_perturbed_label -= pixel_position
fiducial_points_coordinate -= [center_x - self.new_shape[0] // 2, center_y - self.new_shape[1] // 2]
self.synthesis_perturbed_label[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_label[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 2] *= self.foreORbackground_label
'''
synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
# if self.is_perform(0.9, 0.1) or repeat_time > 5:
# # if self.is_perform(0.1, 0.9) and repeat_time > 9:
# # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0)
# # else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
# else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1]
'''
'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img
HSV
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20
perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
# synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771]
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
'''
'''HSV_v2'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
# if self.is_perform(1, 0):
# if self.is_perform(1, 0):
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = self.HSV_v1(perturbed_bg_img_HSV)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
''''''
# cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip)
self.synthesis_perturbed_img[self.synthesis_perturbed_img < 0] = 0
self.synthesis_perturbed_img[self.synthesis_perturbed_img > 255] = 255
self.synthesis_perturbed_img = np.around(self.synthesis_perturbed_img).astype(np.uint8)
label = np.zeros_like(self.synthesis_perturbed_img, dtype=np.float32)
label[:, :, :2] = self.synthesis_perturbed_label
label[:, :, 2] = self.foreORbackground_label
# grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16)
# synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2)
synthesis_perturbed_color = np.concatenate((self.synthesis_perturbed_img, label), axis=2)
self.synthesis_perturbed_color = np.zeros_like(synthesis_perturbed_color, dtype=np.float32)
# self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32)
reduce_value_x = int(round(min((random.random() / 2) * (self.new_shape[0] - (perturbed_x_max - perturbed_x_min)), min(reduce_value, reduce_value_v2))))
reduce_value_y = int(round(min((random.random() / 2) * (self.new_shape[1] - (perturbed_y_max - perturbed_y_min)), min(reduce_value, reduce_value_v2))))
perturbed_x_min = max(perturbed_x_min - reduce_value_x, 0)
perturbed_x_max = min(perturbed_x_max + reduce_value_x, self.new_shape[0])
perturbed_y_min = max(perturbed_y_min - reduce_value_y, 0)
perturbed_y_max = min(perturbed_y_max + reduce_value_y, self.new_shape[1])
if im_lr >= im_ud:
self.synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :]
# self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :]
else:
self.synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :]
# self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :]
'''blur'''
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = self.synthesis_perturbed_color[:, :, :3].copy()
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
else:
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
if self.is_perform(0.5, 0.5):
self.synthesis_perturbed_color[:, :, :3][self.synthesis_perturbed_color[:, :, 5] == 1] = synthesis_perturbed_img_filter[self.synthesis_perturbed_color[:, :, 5] == 1]
else:
self.synthesis_perturbed_color[:, :, :3] = synthesis_perturbed_img_filter
fiducial_points_coordinate = fiducial_points_coordinate[:, :, ::-1]
'''draw fiducial points'''
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_color[:, :, :3].copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[0] + math.ceil(stepSize / 2), l[1] + math.ceil(stepSize / 2)), 2, (0, 0, 255), -1)
cv2.imwrite(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png', fiducial_points_synthesis_perturbed_img)
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
'''forward-begin'''
self.forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_position = (self.synthesis_perturbed_color[:, :, 3:5] + pixel_position)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
flat_position = np.argwhere(np.zeros(save_img_shape, dtype=np.uint32) == 0)
vtx, wts = self.interp_weights(forward_position, flat_position)
wts_sum = np.abs(wts).sum(-1)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
flat_position_forward = flat_position.reshape(save_img_shape[0], save_img_shape[1], 2)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
forward_mapping.reshape(save_img_shape[0] * save_img_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(flat_position_forward, vtx, wts)
forward_mapping = forward_mapping.reshape(save_img_shape[0], save_img_shape[1], 2)
mapping_x_min_, mapping_y_min_, mapping_x_max_, mapping_y_max_ = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
shreshold_zoom_out = 2
mapping_x_min = mapping_x_min_ + shreshold_zoom_out
mapping_y_min = mapping_y_min_ + shreshold_zoom_out
mapping_x_max = mapping_x_max_ - shreshold_zoom_out
mapping_y_max = mapping_y_max_ - shreshold_zoom_out
self.forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max] = forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max]
self.scan_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
self.scan_img[mapping_x_min_:mapping_x_max_, mapping_y_min_:mapping_y_max_] = self.origin_img
self.origin_img = self.scan_img
# flat_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
# cv2.remap(self.synthesis_perturbed_color[:, :, :3], self.forward_mapping[:, :, 1], self.forward_mapping[:, :, 0], cv2.INTER_LINEAR, flat_img)
# cv2.imwrite(self.save_path + 'outputs/1.jpg', flat_img)
'''forward-end'''
synthesis_perturbed_data = {
'fiducial_points': fiducial_points_coordinate,
'segment': np.array((segment_x, segment_y))
}
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
with open(self.save_path+'color/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
pickle_perturbed_data = pickle.dumps(synthesis_perturbed_data)
f.write(pickle_perturbed_data)
# with open(self.save_path+'grey/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
# pickle_perturbed_data = pickle.dumps(self.synthesis_perturbed_grey)
# f.write(pickle_perturbed_data)
# cv2.imwrite(self.save_path+'grey_im/'+perfix_+'_'+fold_curve+'.png', self.synthesis_perturbed_color[:, :, :1])
# cv2.imwrite(self.save_path + 'scan/' + self.save_suffix + '_' + str(m) + '.png', self.origin_img)
trian_t = time.time() - begin_train
mm, ss = divmod(trian_t, 60)
hh, mm = divmod(mm, 60)
print(str(m)+'_'+str(n)+'_'+fold_curve+' '+str(repeat_time)+" Time : %02d:%02d:%02d\n" % (hh, mm, ss))
def multiThread(m, n, img_path_, bg_path_, save_path, save_suffix):
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(10, 3)), 5), 16)
fold = threading.Thread(target=saveFold.save_img, args=(m, n, 'fold', repeat_time, 'relativeShift_v2'), name='fold')
curve = threading.Thread(target=saveCurve.save_img, args=(m, n, 'curve', repeat_time, 'relativeShift_v2'), name='curve')
fold.start()
curve.start()
curve.join()
fold.join()
def xgw(args):
path = args.path
bg_path = args.bg_path
if args.output_path is None:
save_path = '/lustre/home/gwxie/data/unwarp_new/train/general1024/general1024_v1/'
else:
save_path = args.output_path
# if not os.path.exists(save_path + 'grey/'):
# os.makedirs(save_path + 'grey/')
if not os.path.exists(save_path + 'color/'):
os.makedirs(save_path + 'color/')
if not os.path.exists(save_path + 'fiducial_points/'):
os.makedirs(save_path + 'fiducial_points/')
if not os.path.exists(save_path + 'png/'):
os.makedirs(save_path + 'png/')
if not os.path.exists(save_path + 'scan/'):
os.makedirs(save_path + 'scan/')
if not os.path.exists(save_path + 'outputs/'):
os.makedirs(save_path + 'outputs/')
save_suffix = str.split(args.path, '/')[-2]
all_img_path = getDatasets(path)
all_bgImg_path = getDatasets(bg_path)
global begin_train
begin_train = time.time()
fiducial_points = 61 # 31
process_pool = Pool(2)
for m, img_path in enumerate(all_img_path):
for n in range(args.sys_num):
img_path_ = path+img_path
bg_path_ = bg_path+random.choice(all_bgImg_path)+'/'
for m_n in range(10):
try:
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(12, 4)), 1), 18)
# repeat_time = min(max(round(np.random.normal(8, 4)), 1), 12) # random.randint(1, 2) # min(max(round(np.random.normal(8, 4)), 1), 12)
process_pool.apply_async(func=saveFold.save_img, args=(m, n, 'fold', repeat_time, fiducial_points, 'relativeShift_v2'))
repeat_time = min(max(round(np.random.normal(8, 4)), 1), 13)
# repeat_time = min(max(round(np.random.normal(6, 4)), 1), 10)
process_pool.apply_async(func=saveCurve.save_img, args=(m, n, 'curve', repeat_time, fiducial_points, 'relativeShift_v2'))
except BaseException as err:
print(err)
continue
break
# print('end')
process_pool.close()
process_pool.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--path',
default='./scan/new/', type=str,
help='the path of origin img.')
parser.add_argument('--bg_path',
default='./background/', type=str,
help='the path of bg img.')
parser.add_argument('--output_path',
default='./output/', type=str,
help='the path of origin img.')
# parser.set_defaults(output_path='test')
parser.add_argument('--count_from', '-p', default=0, type=int,
metavar='N', help='print frequency (default: 10)') # print frequency
parser.add_argument('--repeat_T', default=0, type=int)
parser.add_argument('--sys_num', default=6, type=int)
args = parser.parse_args()
xgw(args)
| en | 0.299837 | <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) # im_lr % (fiducial_points-1) - 1 # im_ud % (fiducial_points-1) - 1 # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() #np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] im_x,y # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) ***************************************************************** # if not is_normalizationFun_mixture: # normalizationFun_0_1 = self.is_perform(0.5, 0.5) # is_normalizationFun_mixture = False # False # self.is_perform(0.01, 0.99) # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d shadow flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label perspective # 280 # if self.is_perform(1, 0): # right # perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75)) # perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17))) # perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17)) # perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0) # perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16) perspective end to img # self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7)) get fiducial points # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img) clip shrink fiducial points # fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y] # fiducial_points_coordinate[1:, :1, 0] *= shrink_x # fiducial_points_coordinate[:1, 1:, 1] *= shrink_y # perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img) clip perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0: perturbed_x_min = x break for y in range(self.new_shape[1] // 2, perturbed_y_max): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y: perturbed_y_max = y break for y in range(self.new_shape[1] // 2, perturbed_y_min, -1): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0: perturbed_y_min = y break center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2 perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n) self.new_shape = save_img_shape perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0) perturbed_x_min = perturbed_x_ // 2 perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1) perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0) perturbed_y_min = perturbed_y_ // 2 perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1) self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy() self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy() self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy() save synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) # if self.is_perform(0.9, 0.1) or repeat_time > 5: # # if self.is_perform(0.1, 0.9) and repeat_time > 9: # # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0) # # else: # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0) # else: # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1] perturbed_bg_img = perturbed_bg_img.astype(np.float32) perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img HSV perturbed_bg_img = perturbed_bg_img.astype(np.float32) if self.is_perform(0.1, 0.9): if self.is_perform(0.2, 0.8): synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20 synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_ synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB) perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV else: perturbed_bg_img_HSV = perturbed_bg_img perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20 perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_ perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB) perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img_HSV # self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] else: synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img # synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771] synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20 synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_ synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB) self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV HSV_v2 # if self.is_perform(1, 0): # if self.is_perform(1, 0): # self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] # cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip) # grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16) # synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2) # self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32) # self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] # self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] blur draw fiducial points forward-begin # flat_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32) # cv2.remap(self.synthesis_perturbed_color[:, :, :3], self.forward_mapping[:, :, 1], self.forward_mapping[:, :, 0], cv2.INTER_LINEAR, flat_img) # cv2.imwrite(self.save_path + 'outputs/1.jpg', flat_img) forward-end # with open(self.save_path+'grey/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f: # pickle_perturbed_data = pickle.dumps(self.synthesis_perturbed_grey) # f.write(pickle_perturbed_data) # cv2.imwrite(self.save_path+'grey_im/'+perfix_+'_'+fold_curve+'.png', self.synthesis_perturbed_color[:, :, :1]) # cv2.imwrite(self.save_path + 'scan/' + self.save_suffix + '_' + str(m) + '.png', self.origin_img) # if not os.path.exists(save_path + 'grey/'): # os.makedirs(save_path + 'grey/') # 31 # repeat_time = min(max(round(np.random.normal(8, 4)), 1), 12) # random.randint(1, 2) # min(max(round(np.random.normal(8, 4)), 1), 12) # repeat_time = min(max(round(np.random.normal(6, 4)), 1), 10) # print('end') # parser.set_defaults(output_path='test') # print frequency | 1.858941 | 2 |
tweet_evaluator.py | tw-ddis/Gnip-Tweet-Evaluation | 3 | 728 | #!/usr/bin/env python
import argparse
import logging
try:
import ujson as json
except ImportError:
import json
import sys
import datetime
import os
import importlib
from gnip_tweet_evaluation import analysis,output
"""
Perform audience and/or conversation analysis on a set of Tweets.
"""
logger = logging.getLogger('analysis')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-n","--identifier",dest="unique_identifier", default='0',type=str,
help="a unique name to identify the conversation/audience; default is '%(default)s'")
parser.add_argument("-c","--do-conversation-analysis",dest="do_conversation_analysis",action="store_true",default=False,
help="do conversation analysis on Tweets")
parser.add_argument("-a","--do-audience-analysis",dest="do_audience_analysis",action="store_true",default=False,
help="do audience analysis on users")
parser.add_argument("-i","--input-file-name",dest="input_file_name",default=None,
help="file containing Tweet data; take input from stdin if not present")
parser.add_argument('-o','--output-dir',dest='output_directory',default=os.environ['HOME'] + '/tweet_evaluation/',
help='directory for output files; default is %(default)s')
parser.add_argument('-b','--baseline-input-file',dest='baseline_input_name',default=None,
help='Tweets against which to run a relative analysis')
args = parser.parse_args()
# get the time right now, to use in output naming
time_now = datetime.datetime.now()
output_directory = '{0}/{1:04d}/{2:02d}/{3:02d}/'.format(args.output_directory.rstrip('/')
,time_now.year
,time_now.month
,time_now.day
)
# get the empty results object, which defines the measurements to be run
results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
baseline_results = None
if args.baseline_input_name is not None:
baseline_results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
# manage input sources, file opening, and deserialization
if args.input_file_name is not None:
tweet_generator = analysis.deserialize_tweets(open(args.input_file_name))
else:
tweet_generator = analysis.deserialize_tweets(sys.stdin)
# run analysis
analysis.analyze_tweets(tweet_generator, results)
# run baseline analysis, if requests
if baseline_results is not None:
baseline_tweet_generator = analysis.deserialize_tweets(open(args.baseline_input_name))
analysis.analyze_tweets(baseline_tweet_generator, baseline_results)
results = analysis.compare_results(results,baseline_results)
# dump the output
output.dump_results(results, output_directory, args.unique_identifier)
| #!/usr/bin/env python
import argparse
import logging
try:
import ujson as json
except ImportError:
import json
import sys
import datetime
import os
import importlib
from gnip_tweet_evaluation import analysis,output
"""
Perform audience and/or conversation analysis on a set of Tweets.
"""
logger = logging.getLogger('analysis')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-n","--identifier",dest="unique_identifier", default='0',type=str,
help="a unique name to identify the conversation/audience; default is '%(default)s'")
parser.add_argument("-c","--do-conversation-analysis",dest="do_conversation_analysis",action="store_true",default=False,
help="do conversation analysis on Tweets")
parser.add_argument("-a","--do-audience-analysis",dest="do_audience_analysis",action="store_true",default=False,
help="do audience analysis on users")
parser.add_argument("-i","--input-file-name",dest="input_file_name",default=None,
help="file containing Tweet data; take input from stdin if not present")
parser.add_argument('-o','--output-dir',dest='output_directory',default=os.environ['HOME'] + '/tweet_evaluation/',
help='directory for output files; default is %(default)s')
parser.add_argument('-b','--baseline-input-file',dest='baseline_input_name',default=None,
help='Tweets against which to run a relative analysis')
args = parser.parse_args()
# get the time right now, to use in output naming
time_now = datetime.datetime.now()
output_directory = '{0}/{1:04d}/{2:02d}/{3:02d}/'.format(args.output_directory.rstrip('/')
,time_now.year
,time_now.month
,time_now.day
)
# get the empty results object, which defines the measurements to be run
results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
baseline_results = None
if args.baseline_input_name is not None:
baseline_results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
# manage input sources, file opening, and deserialization
if args.input_file_name is not None:
tweet_generator = analysis.deserialize_tweets(open(args.input_file_name))
else:
tweet_generator = analysis.deserialize_tweets(sys.stdin)
# run analysis
analysis.analyze_tweets(tweet_generator, results)
# run baseline analysis, if requests
if baseline_results is not None:
baseline_tweet_generator = analysis.deserialize_tweets(open(args.baseline_input_name))
analysis.analyze_tweets(baseline_tweet_generator, baseline_results)
results = analysis.compare_results(results,baseline_results)
# dump the output
output.dump_results(results, output_directory, args.unique_identifier)
| en | 0.779311 | #!/usr/bin/env python Perform audience and/or conversation analysis on a set of Tweets. # get the time right now, to use in output naming # get the empty results object, which defines the measurements to be run # manage input sources, file opening, and deserialization # run analysis # run baseline analysis, if requests # dump the output | 2.799676 | 3 |
app.py | admiral-aokiji/whatsapp-bot | 0 | 729 | <gh_stars>0
from flask import Flask, request
import os
from twilio.twiml.messaging_response import MessagingResponse
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
app = Flask(__name__)
import utils
@app.route("/")
def hello():
return "Hello World!"
@app.route('/bot', methods=['POST'])
def bot():
incoming_msg = request.values.get('Body', '')
print(incoming_msg)
resp = MessagingResponse()
msg = resp.message()
responded = False
if incoming_msg in ['Hi', 'Hey', 'Menu']:
text = f'Hello\n For any suggestions or requests 👇 \n 📞 : 9537701631 \n ✉ : <EMAIL> \n\n Please enter one of the following option 👇 \n *TPC*. TPC portal willingness \n *B*. __________. '
msg.body(text)
responded = True
elif 'TPC' in incoming_msg:
if incoming_msg == 'TPC':
text = 'Menu of options for TPC command'
msg.body(text)
h = 7
responded = True
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
if incoming_msg == 'TPC -willingness -short' or incoming_msg == 'TPC -w -s':
utils.getWillingness()
utils.shortenWillingness()
elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d':
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
utils.getWillingness()
elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d':
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
utils.getWillingness()
elif incoming_msg[:15] == 'TPC -experience' or (incoming_msg[:7] == 'TPC - e ' and len(incoming_msg)>8):
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
companyName = incoming_msg.split(' ')[2]
print(companyName)
utils.getInterviewExperience(companyName)
else:
# send custom error msg for TPC commands
pass
else:
# Checking for formality
if responded == False:
msg.body('Please enter valid commands')
return str(resp)
if __name__ == "__main__":
app.run(host="localhost", port=5000, debug=True)
| from flask import Flask, request
import os
from twilio.twiml.messaging_response import MessagingResponse
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
app = Flask(__name__)
import utils
@app.route("/")
def hello():
return "Hello World!"
@app.route('/bot', methods=['POST'])
def bot():
incoming_msg = request.values.get('Body', '')
print(incoming_msg)
resp = MessagingResponse()
msg = resp.message()
responded = False
if incoming_msg in ['Hi', 'Hey', 'Menu']:
text = f'Hello\n For any suggestions or requests 👇 \n 📞 : 9537701631 \n ✉ : <EMAIL> \n\n Please enter one of the following option 👇 \n *TPC*. TPC portal willingness \n *B*. __________. '
msg.body(text)
responded = True
elif 'TPC' in incoming_msg:
if incoming_msg == 'TPC':
text = 'Menu of options for TPC command'
msg.body(text)
h = 7
responded = True
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
if incoming_msg == 'TPC -willingness -short' or incoming_msg == 'TPC -w -s':
utils.getWillingness()
utils.shortenWillingness()
elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d':
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
utils.getWillingness()
elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d':
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
utils.getWillingness()
elif incoming_msg[:15] == 'TPC -experience' or (incoming_msg[:7] == 'TPC - e ' and len(incoming_msg)>8):
utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD'))
companyName = incoming_msg.split(' ')[2]
print(companyName)
utils.getInterviewExperience(companyName)
else:
# send custom error msg for TPC commands
pass
else:
# Checking for formality
if responded == False:
msg.body('Please enter valid commands')
return str(resp)
if __name__ == "__main__":
app.run(host="localhost", port=5000, debug=True) | en | 0.589405 | # send custom error msg for TPC commands # Checking for formality | 2.463363 | 2 |
nflfastpy/errors.py | hchaozhe/nflfastpy | 47 | 730 | """
Custom exceptions for nflfastpy module
"""
class SeasonNotFoundError(Exception):
pass | """
Custom exceptions for nflfastpy module
"""
class SeasonNotFoundError(Exception):
pass | en | 0.508778 | Custom exceptions for nflfastpy module | 1.586333 | 2 |
assignment1/cs231n/classifiers/neural_net.py | zeevikal/CS231n-spring2018 | 0 | 731 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension
of N, a hidden layer dimension of H, and performs classification over C
classes.
We train the network with a softmax loss function and L2 regularization on
the weight matrices. The network uses a ReLU nonlinearity after the first
fully connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values
and biases are initialized to zero. Weights and biases are stored in
the variable self.params, which is a dictionary with the following keys
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each
y[i] is an integer in the range 0 <= y[i] < C. This parameter is
optional; if it is not passed then we only return scores, and if it
is passed then we instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of
training samples.
- grads: Dictionary mapping parameter names to gradients of those
parameters with respect to the loss function; has the same keys as
self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#######################################################################
# TODO: Perform the forward pass, computing the class scores for the #
# input. Store the result in the scores variable, which should be an #
# array of shape (N, C). #
#######################################################################
scores1 = X.dot(W1) + b1 # FC1
X2 = np.maximum(0, scores1) # ReLU FC1
scores = X2.dot(W2) + b2 # FC2
#######################################################################
# END OF YOUR CODE #
#######################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
scores -= np.max(scores) # Fix Number instability
scores_exp = np.exp(scores)
probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True)
# Compute the loss
loss = None
#######################################################################
# TODO: Finish the forward pass, and compute the loss. This should #
# include both the data loss and L2 regularization for W1 and W2. #
# Store the result in the variable loss, which should be a scalar. Use#
# the Softmax classifier loss. #
#######################################################################
correct_probs = -np.log(probs[np.arange(N), y])
# L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs)
loss = np.sum(correct_probs)
loss /= N
# L2 regularization WRT W1 and W2
loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
#######################################################################
# END OF YOUR CODE #
#######################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# gradient of loss_i WRT scores_k
# dL_i/ds_k = probs_k-1(y_i == k)
# this means the gradient is the score for "other" classes and score-1
# for the target class
d_scores = probs.copy()
d_scores[np.arange(N), y] -= 1
d_scores /= N
# W2 were multiplied with X2, by chain rule and multiplication
# derivative, WRT W2 we need to multiply downstream derivative by X2
d_W2 = X2.T.dot(d_scores)
# b2 was added, so it's d is 1 but we must multiply it with chain rule
# (downstream), in this case d_scores
d_b2 = np.sum(d_scores, axis=0)
# W1 is upstream of X2, so we continue this way
d_X2 = d_scores.dot(W2.T)
# ReLU derivative is 1 for > 0, else 0
d_scores1 = d_X2 * (scores1 > 0)
d_W1 = X.T.dot(d_scores1)
# b1 gradient
d_b1 = d_scores1.sum(axis=0)
# regularization gradient (reg*W2^2)
d_W2 += reg * 2 * W2
d_W1 += reg * 2 * W1
grads['W1'] = d_W1
grads['b1'] = d_b1
grads['W2'] = d_W2
grads['b2'] = d_b2
#######################################################################
# END OF YOUR CODE #
#######################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning
rate after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
###################################################################
# TODO: Create a random minibatch of training data and labels, #
# storing them in X_batch and y_batch respectively. #
###################################################################
# random indexes to sample training data/labels
sample_idx = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[sample_idx]
y_batch = y[sample_idx]
###################################################################
# END OF YOUR CODE #
###################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
###################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params)#
# using stochastic gradient descent. You'll need to use the #
# gradients stored in the grads dictionary defined above. #
###################################################################
# For each weight in network parameters, update it with the
# corresponding calculated gradient
for key in self.params:
self.params[key] -= learning_rate * grads[key]
###################################################################
# END OF YOUR CODE #
###################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points
to classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each
of the elements of X. For all i, y_pred[i] = c means that X[i] is
predicted to have class c, where 0 <= c < C.
"""
y_pred = None
#######################################################################
# TODO: Implement this function; it should be VERY simple! #
#######################################################################
y_pred = np.argmax(self.loss(X), axis=1)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return y_pred | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension
of N, a hidden layer dimension of H, and performs classification over C
classes.
We train the network with a softmax loss function and L2 regularization on
the weight matrices. The network uses a ReLU nonlinearity after the first
fully connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values
and biases are initialized to zero. Weights and biases are stored in
the variable self.params, which is a dictionary with the following keys
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each
y[i] is an integer in the range 0 <= y[i] < C. This parameter is
optional; if it is not passed then we only return scores, and if it
is passed then we instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of
training samples.
- grads: Dictionary mapping parameter names to gradients of those
parameters with respect to the loss function; has the same keys as
self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#######################################################################
# TODO: Perform the forward pass, computing the class scores for the #
# input. Store the result in the scores variable, which should be an #
# array of shape (N, C). #
#######################################################################
scores1 = X.dot(W1) + b1 # FC1
X2 = np.maximum(0, scores1) # ReLU FC1
scores = X2.dot(W2) + b2 # FC2
#######################################################################
# END OF YOUR CODE #
#######################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
scores -= np.max(scores) # Fix Number instability
scores_exp = np.exp(scores)
probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True)
# Compute the loss
loss = None
#######################################################################
# TODO: Finish the forward pass, and compute the loss. This should #
# include both the data loss and L2 regularization for W1 and W2. #
# Store the result in the variable loss, which should be a scalar. Use#
# the Softmax classifier loss. #
#######################################################################
correct_probs = -np.log(probs[np.arange(N), y])
# L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs)
loss = np.sum(correct_probs)
loss /= N
# L2 regularization WRT W1 and W2
loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
#######################################################################
# END OF YOUR CODE #
#######################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# gradient of loss_i WRT scores_k
# dL_i/ds_k = probs_k-1(y_i == k)
# this means the gradient is the score for "other" classes and score-1
# for the target class
d_scores = probs.copy()
d_scores[np.arange(N), y] -= 1
d_scores /= N
# W2 were multiplied with X2, by chain rule and multiplication
# derivative, WRT W2 we need to multiply downstream derivative by X2
d_W2 = X2.T.dot(d_scores)
# b2 was added, so it's d is 1 but we must multiply it with chain rule
# (downstream), in this case d_scores
d_b2 = np.sum(d_scores, axis=0)
# W1 is upstream of X2, so we continue this way
d_X2 = d_scores.dot(W2.T)
# ReLU derivative is 1 for > 0, else 0
d_scores1 = d_X2 * (scores1 > 0)
d_W1 = X.T.dot(d_scores1)
# b1 gradient
d_b1 = d_scores1.sum(axis=0)
# regularization gradient (reg*W2^2)
d_W2 += reg * 2 * W2
d_W1 += reg * 2 * W1
grads['W1'] = d_W1
grads['b1'] = d_b1
grads['W2'] = d_W2
grads['b2'] = d_b2
#######################################################################
# END OF YOUR CODE #
#######################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning
rate after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
###################################################################
# TODO: Create a random minibatch of training data and labels, #
# storing them in X_batch and y_batch respectively. #
###################################################################
# random indexes to sample training data/labels
sample_idx = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[sample_idx]
y_batch = y[sample_idx]
###################################################################
# END OF YOUR CODE #
###################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
###################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params)#
# using stochastic gradient descent. You'll need to use the #
# gradients stored in the grads dictionary defined above. #
###################################################################
# For each weight in network parameters, update it with the
# corresponding calculated gradient
for key in self.params:
self.params[key] -= learning_rate * grads[key]
###################################################################
# END OF YOUR CODE #
###################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points
to classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each
of the elements of X. For all i, y_pred[i] = c means that X[i] is
predicted to have class c, where 0 <= c < C.
"""
y_pred = None
#######################################################################
# TODO: Implement this function; it should be VERY simple! #
#######################################################################
y_pred = np.argmax(self.loss(X), axis=1)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return y_pred | en | 0.52014 | A two-layer fully-connected neural network. The net has an input dimension of N, a hidden layer dimension of H, and performs classification over C classes. We train the network with a softmax loss function and L2 regularization on the weight matrices. The network uses a ReLU nonlinearity after the first fully connected layer. In other words, the network has the following architecture: input - fully connected layer - ReLU - fully connected layer - softmax The outputs of the second fully-connected layer are the scores for each class. Initialize the model. Weights are initialized to small random values and biases are initialized to zero. Weights and biases are stored in the variable self.params, which is a dictionary with the following keys W1: First layer weights; has shape (D, H) b1: First layer biases; has shape (H,) W2: Second layer weights; has shape (H, C) b2: Second layer biases; has shape (C,) Inputs: - input_size: The dimension D of the input data. - hidden_size: The number of neurons H in the hidden layer. - output_size: The number of classes C. Compute the loss and gradients for a two layer fully connected neural network. Inputs: - X: Input data of shape (N, D). Each X[i] is a training sample. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is an integer in the range 0 <= y[i] < C. This parameter is optional; if it is not passed then we only return scores, and if it is passed then we instead return the loss and gradients. - reg: Regularization strength. Returns: If y is None, return a matrix scores of shape (N, C) where scores[i, c] is the score for class c on input X[i]. If y is not None, instead return a tuple of: - loss: Loss (data loss and regularization loss) for this batch of training samples. - grads: Dictionary mapping parameter names to gradients of those parameters with respect to the loss function; has the same keys as self.params. # Unpack variables from the params dictionary # Compute the forward pass ####################################################################### # TODO: Perform the forward pass, computing the class scores for the # # input. Store the result in the scores variable, which should be an # # array of shape (N, C). # ####################################################################### # FC1 # ReLU FC1 # FC2 ####################################################################### # END OF YOUR CODE # ####################################################################### # If the targets are not given then jump out, we're done # Fix Number instability # Compute the loss ####################################################################### # TODO: Finish the forward pass, and compute the loss. This should # # include both the data loss and L2 regularization for W1 and W2. # # Store the result in the variable loss, which should be a scalar. Use# # the Softmax classifier loss. # ####################################################################### # L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs) # L2 regularization WRT W1 and W2 ####################################################################### # END OF YOUR CODE # ####################################################################### # Backward pass: compute gradients ############################################################################# # TODO: Compute the backward pass, computing the derivatives of the weights # # and biases. Store the results in the grads dictionary. For example, # # grads['W1'] should store the gradient on W1, and be a matrix of same size # ############################################################################# # gradient of loss_i WRT scores_k # dL_i/ds_k = probs_k-1(y_i == k) # this means the gradient is the score for "other" classes and score-1 # for the target class # W2 were multiplied with X2, by chain rule and multiplication # derivative, WRT W2 we need to multiply downstream derivative by X2 # b2 was added, so it's d is 1 but we must multiply it with chain rule # (downstream), in this case d_scores # W1 is upstream of X2, so we continue this way # ReLU derivative is 1 for > 0, else 0 # b1 gradient # regularization gradient (reg*W2^2) ####################################################################### # END OF YOUR CODE # ####################################################################### Train this neural network using stochastic gradient descent. Inputs: - X: A numpy array of shape (N, D) giving training data. - y: A numpy array f shape (N,) giving training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - X_val: A numpy array of shape (N_val, D) giving validation data. - y_val: A numpy array of shape (N_val,) giving validation labels. - learning_rate: Scalar giving learning rate for optimization. - learning_rate_decay: Scalar giving factor used to decay the learning rate after each epoch. - reg: Scalar giving regularization strength. - num_iters: Number of steps to take when optimizing. - batch_size: Number of training examples to use per step. - verbose: boolean; if true print progress during optimization. # Use SGD to optimize the parameters in self.model ################################################################### # TODO: Create a random minibatch of training data and labels, # # storing them in X_batch and y_batch respectively. # ################################################################### # random indexes to sample training data/labels ################################################################### # END OF YOUR CODE # ################################################################### # Compute loss and gradients using the current minibatch ################################################################### # TODO: Use the gradients in the grads dictionary to update the # # parameters of the network (stored in the dictionary self.params)# # using stochastic gradient descent. You'll need to use the # # gradients stored in the grads dictionary defined above. # ################################################################### # For each weight in network parameters, update it with the # corresponding calculated gradient ################################################################### # END OF YOUR CODE # ################################################################### # Every epoch, check train and val accuracy and decay learning rate # Check accuracy # Decay learning rate Use the trained weights of this two-layer network to predict labels for data points. For each data point we predict scores for each of the C classes, and assign each data point to the class with the highest score Inputs: - X: A numpy array of shape (N, D) giving N D-dimensional data points to classify. Returns: - y_pred: A numpy array of shape (N,) giving predicted labels for each of the elements of X. For all i, y_pred[i] = c means that X[i] is predicted to have class c, where 0 <= c < C. ####################################################################### # TODO: Implement this function; it should be VERY simple! # ####################################################################### ####################################################################### # END OF YOUR CODE # ####################################################################### | 4.227996 | 4 |
dynamic_setting/tests/test_models.py | koralarts/django-dynamic-settings | 2 | 732 | from django.test import TestCase
from dynamic_setting.models import Setting
class SettingTestCase(TestCase):
def _create_setting(self, name, **kwargs):
return Setting.objects.create(name=name, **kwargs)
def test_create_setting(self):
""" Test Creating a new Setting. """
name = 'TEST_SETTING'
data = 'Setting Data'
setting = self._create_setting(name, data=data)
self.assertEqual(setting.name, name)
self.assertEqual(setting.__str__(), name)
self.assertEqual(setting.data, data)
def test_create_setting_no_data(self):
""" Test Creating a new setting without Data. """
name = 'TEST_SETTING'
data = '-'
setting = self._create_setting(name)
self.assertEqual(setting.name, name)
self.assertEqual(setting.__str__(), name)
self.assertEqual(setting.data, data)
def test_delete_setting(self):
""" Test Deleting a setting object. """
name = 'TEST_SETTING'
setting = self._create_setting(name)
setting_pk = setting.pk
setting.delete()
try:
Setting.objects.get(pk=setting_pk)
except Setting.DoesNotExist:
pass
else:
self.fail('Setting with ID {} should not exist.'.format(setting_pk))
def test_get_setting(self):
""" Test Getting a setting object. """
name = 'TEST_SETTING'
data = 'Setting data'
setting = self._create_setting(name, data=data)
try:
setting2 = Setting.objects.get(pk=setting.pk)
except Setting.DoesNotExist:
self.fail('Setting with ID {} should exist'.format(setting.pk))
self.assertEqual(setting.name, setting2.name)
self.assertEqual(setting.__str__(), setting2.__str__())
self.assertEqual(setting.data, setting2.data)
self.assertEqual(setting.pk, setting2.pk)
def test_update_setting(self):
""" Test Updating a setting object. """
name = 'TEST_SETTING'
data = 'Setting data'
data2 = 'New Setting Data'
setting = self._create_setting(name, data=data)
setting.data = data2
setting.save()
setting2 = Setting.objects.get(pk=setting.pk)
self.assertEqual(setting2.data, data2) | from django.test import TestCase
from dynamic_setting.models import Setting
class SettingTestCase(TestCase):
def _create_setting(self, name, **kwargs):
return Setting.objects.create(name=name, **kwargs)
def test_create_setting(self):
""" Test Creating a new Setting. """
name = 'TEST_SETTING'
data = 'Setting Data'
setting = self._create_setting(name, data=data)
self.assertEqual(setting.name, name)
self.assertEqual(setting.__str__(), name)
self.assertEqual(setting.data, data)
def test_create_setting_no_data(self):
""" Test Creating a new setting without Data. """
name = 'TEST_SETTING'
data = '-'
setting = self._create_setting(name)
self.assertEqual(setting.name, name)
self.assertEqual(setting.__str__(), name)
self.assertEqual(setting.data, data)
def test_delete_setting(self):
""" Test Deleting a setting object. """
name = 'TEST_SETTING'
setting = self._create_setting(name)
setting_pk = setting.pk
setting.delete()
try:
Setting.objects.get(pk=setting_pk)
except Setting.DoesNotExist:
pass
else:
self.fail('Setting with ID {} should not exist.'.format(setting_pk))
def test_get_setting(self):
""" Test Getting a setting object. """
name = 'TEST_SETTING'
data = 'Setting data'
setting = self._create_setting(name, data=data)
try:
setting2 = Setting.objects.get(pk=setting.pk)
except Setting.DoesNotExist:
self.fail('Setting with ID {} should exist'.format(setting.pk))
self.assertEqual(setting.name, setting2.name)
self.assertEqual(setting.__str__(), setting2.__str__())
self.assertEqual(setting.data, setting2.data)
self.assertEqual(setting.pk, setting2.pk)
def test_update_setting(self):
""" Test Updating a setting object. """
name = 'TEST_SETTING'
data = 'Setting data'
data2 = 'New Setting Data'
setting = self._create_setting(name, data=data)
setting.data = data2
setting.save()
setting2 = Setting.objects.get(pk=setting.pk)
self.assertEqual(setting2.data, data2) | en | 0.779235 | Test Creating a new Setting. Test Creating a new setting without Data. Test Deleting a setting object. Test Getting a setting object. Test Updating a setting object. | 2.826155 | 3 |
homeassistant/components/zamg/weather.py | MrDelik/core | 30,023 | 733 | """Sensor for data from Austrian Zentralanstalt für Meteorologie."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
# Reuse data and API logic from the sensor implementation
from .sensor import (
ATTRIBUTION,
CONF_STATION_ID,
ZamgData,
closest_station,
zamg_stations,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION_ID): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the ZAMG weather platform."""
name = config.get(CONF_NAME)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
station_id = config.get(CONF_STATION_ID) or closest_station(
latitude, longitude, hass.config.config_dir
)
if station_id not in zamg_stations(hass.config.config_dir):
_LOGGER.error(
"Configured ZAMG %s (%s) is not a known station",
CONF_STATION_ID,
station_id,
)
return
probe = ZamgData(station_id=station_id)
try:
probe.update()
except (ValueError, TypeError) as err:
_LOGGER.error("Received error from ZAMG: %s", err)
return
add_entities([ZamgWeather(probe, name)], True)
class ZamgWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, zamg_data, stationname=None):
"""Initialise the platform with a data instance and station name."""
self.zamg_data = zamg_data
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
return (
self.stationname
or f"ZAMG {self.zamg_data.data.get('Name') or '(unknown station)'}"
)
@property
def condition(self):
"""Return the current condition."""
return None
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def temperature(self):
"""Return the platform temperature."""
return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE)
@property
def humidity(self):
"""Return the humidity."""
return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY)
@property
def wind_speed(self):
"""Return the wind speed."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING)
def update(self):
"""Update current conditions."""
self.zamg_data.update()
| """Sensor for data from Austrian Zentralanstalt für Meteorologie."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
# Reuse data and API logic from the sensor implementation
from .sensor import (
ATTRIBUTION,
CONF_STATION_ID,
ZamgData,
closest_station,
zamg_stations,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION_ID): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the ZAMG weather platform."""
name = config.get(CONF_NAME)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
station_id = config.get(CONF_STATION_ID) or closest_station(
latitude, longitude, hass.config.config_dir
)
if station_id not in zamg_stations(hass.config.config_dir):
_LOGGER.error(
"Configured ZAMG %s (%s) is not a known station",
CONF_STATION_ID,
station_id,
)
return
probe = ZamgData(station_id=station_id)
try:
probe.update()
except (ValueError, TypeError) as err:
_LOGGER.error("Received error from ZAMG: %s", err)
return
add_entities([ZamgWeather(probe, name)], True)
class ZamgWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, zamg_data, stationname=None):
"""Initialise the platform with a data instance and station name."""
self.zamg_data = zamg_data
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
return (
self.stationname
or f"ZAMG {self.zamg_data.data.get('Name') or '(unknown station)'}"
)
@property
def condition(self):
"""Return the current condition."""
return None
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def temperature(self):
"""Return the platform temperature."""
return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE)
@property
def humidity(self):
"""Return the humidity."""
return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY)
@property
def wind_speed(self):
"""Return the wind speed."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING)
def update(self):
"""Update current conditions."""
self.zamg_data.update()
| en | 0.746571 | Sensor for data from Austrian Zentralanstalt für Meteorologie. # Reuse data and API logic from the sensor implementation Set up the ZAMG weather platform. Representation of a weather condition. Initialise the platform with a data instance and station name. Return the name of the sensor. Return the current condition. Return the attribution. Return the platform temperature. Return the unit of measurement. Return the pressure. Return the humidity. Return the wind speed. Return the wind bearing. Update current conditions. | 2.203988 | 2 |
rasa/model.py | martasls/rasa | 0 | 734 | import copy
import glob
import hashlib
import logging
import os
import shutil
from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404
import tempfile
import typing
from pathlib import Path
from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
import rasa.shared.utils.io
import rasa.utils.io
from rasa.cli.utils import create_output_path
from rasa.shared.utils.cli import print_success
from rasa.shared.constants import (
CONFIG_KEYS_CORE,
CONFIG_KEYS_NLU,
CONFIG_KEYS,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_CORE_SUBDIRECTORY_NAME,
DEFAULT_NLU_SUBDIRECTORY_NAME,
)
from rasa.exceptions import ModelNotFound
from rasa.utils.common import TempDirectoryPath
if typing.TYPE_CHECKING:
from rasa.shared.importers.importer import TrainingDataImporter
logger = logging.getLogger(__name__)
# Type alias for the fingerprint
Fingerprint = Dict[Text, Union[Text, List[Text], int, float]]
FINGERPRINT_FILE_PATH = "fingerprint.json"
FINGERPRINT_CONFIG_KEY = "config"
FINGERPRINT_CONFIG_CORE_KEY = "core-config"
FINGERPRINT_CONFIG_NLU_KEY = "nlu-config"
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs"
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain"
FINGERPRINT_NLG_KEY = "nlg"
FINGERPRINT_RASA_VERSION_KEY = "version"
FINGERPRINT_STORIES_KEY = "stories"
FINGERPRINT_NLU_DATA_KEY = "messages"
FINGERPRINT_NLU_LABELS_KEY = "nlu_labels"
FINGERPRINT_PROJECT = "project"
FINGERPRINT_TRAINED_AT_KEY = "trained_at"
class Section(NamedTuple):
"""Specifies which fingerprint keys decide whether this sub-model is retrained."""
name: Text
relevant_keys: List[Text]
SECTION_CORE = Section(
name="Core model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_CORE_KEY,
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY,
FINGERPRINT_STORIES_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLU = Section(
name="NLU model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_NLU_KEY,
FINGERPRINT_NLU_DATA_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY])
class FingerprintComparisonResult:
"""Container for the results of a fingerprint comparison."""
def __init__(
self,
nlu: bool = True,
core: bool = True,
nlg: bool = True,
force_training: bool = False,
):
"""Creates a `FingerprintComparisonResult` instance.
Args:
nlu: `True` if the NLU model should be retrained.
core: `True` if the Core model should be retrained.
nlg: `True` if the responses in the domain should be updated.
force_training: `True` if a training of all parts is forced.
"""
self.nlu = nlu
self.core = core
self.nlg = nlg
self.force_training = force_training
def is_training_required(self) -> bool:
"""Check if anything has to be retrained."""
return any([self.nlg, self.nlu, self.core, self.force_training])
def should_retrain_core(self) -> bool:
"""Check if the Core model has to be updated."""
return self.force_training or self.core
def should_retrain_nlg(self) -> bool:
"""Check if the responses have to be updated."""
return self.should_retrain_core() or self.nlg
def should_retrain_nlu(self) -> bool:
"""Check if the NLU model has to be updated."""
return self.force_training or self.nlu
def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath:
"""Get a model and unpack it. Raises a `ModelNotFound` exception if
no model could be found at the provided path.
Args:
model_path: Path to the zipped model. If it's a directory, the latest
trained model is returned.
Returns:
Path to the unpacked model.
"""
if not model_path:
raise ModelNotFound("No path specified.")
elif not os.path.exists(model_path):
raise ModelNotFound(f"No file or directory at '{model_path}'.")
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
if not model_path:
raise ModelNotFound(
f"Could not find any Rasa model files in '{model_path}'."
)
elif not model_path.endswith(".tar.gz"):
raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.")
try:
model_relative_path = os.path.relpath(model_path)
except ValueError:
model_relative_path = model_path
logger.info(f"Loading model {model_relative_path}...")
return unpack_model(model_path)
def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]:
"""Get the latest model from a path.
Args:
model_path: Path to a directory containing zipped models.
Returns:
Path to latest model in the given directory.
"""
if not os.path.exists(model_path) or os.path.isfile(model_path):
model_path = os.path.dirname(model_path)
list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz"))
if len(list_of_files) == 0:
return None
return max(list_of_files, key=os.path.getctime)
def unpack_model(
model_file: Text, working_directory: Optional[Union[Path, Text]] = None
) -> TempDirectoryPath:
"""Unpack a zipped Rasa model.
Args:
model_file: Path to zipped model.
working_directory: Location where the model should be unpacked to.
If `None` a temporary directory will be created.
Returns:
Path to unpacked Rasa model.
"""
import tarfile
if working_directory is None:
working_directory = tempfile.mkdtemp()
# All files are in a subdirectory.
try:
with tarfile.open(model_file, mode="r:gz") as tar:
tar.extractall(working_directory)
logger.debug(f"Extracted model to '{working_directory}'.")
except Exception as e:
logger.error(f"Failed to extract model at {model_file}. Error: {e}")
raise
return TempDirectoryPath(working_directory)
def get_model_subdirectories(
unpacked_model_path: Text,
) -> Tuple[Optional[Text], Optional[Text]]:
"""Return paths for Core and NLU model directories, if they exist.
If neither directories exist, a `ModelNotFound` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME)
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise ModelNotFound(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
def create_package_rasa(
training_directory: Text,
output_filename: Text,
fingerprint: Optional[Fingerprint] = None,
) -> Text:
"""Create a zipped Rasa model from trained model files.
Args:
training_directory: Path to the directory which contains the trained
model files.
output_filename: Name of the zipped model file to be created.
fingerprint: A unique fingerprint to identify the model version.
Returns:
Path to zipped model.
"""
import tarfile
if fingerprint:
persist_fingerprint(training_directory, fingerprint)
output_directory = os.path.dirname(output_filename)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with tarfile.open(output_filename, "w:gz") as tar:
for elem in os.scandir(training_directory):
tar.add(elem.path, arcname=elem.name)
shutil.rmtree(training_directory)
return output_filename
def project_fingerprint() -> Optional[Text]:
"""Create a hash for the project in the current working directory.
Returns:
project hash
"""
try:
remote = check_output( # skipcq:BAN-B607,BAN-B603
["git", "remote", "get-url", "origin"], stderr=DEVNULL
)
return hashlib.sha256(remote).hexdigest()
except (CalledProcessError, OSError):
return None
async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint:
"""Create a model fingerprint from its used configuration and training data.
Args:
file_importer: File importer which provides the training data and model config.
Returns:
The fingerprint.
"""
import time
config = await file_importer.get_config()
domain = await file_importer.get_domain()
stories = await file_importer.get_stories()
nlu_data = await file_importer.get_nlu_data()
responses = domain.responses
# Do a copy of the domain to not change the actual domain (shallow is enough)
domain = copy.copy(domain)
# don't include the response texts in the fingerprint.
# Their fingerprint is separate.
domain.responses = {}
return {
FINGERPRINT_CONFIG_KEY: _get_fingerprint_of_config(
config, exclude_keys=CONFIG_KEYS
),
FINGERPRINT_CONFIG_CORE_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_CORE
),
FINGERPRINT_CONFIG_NLU_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_NLU
),
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY: _get_fingerprint_of_config_without_epochs(
config
),
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain.fingerprint(),
FINGERPRINT_NLG_KEY: rasa.shared.utils.io.deep_container_fingerprint(responses),
FINGERPRINT_PROJECT: project_fingerprint(),
FINGERPRINT_NLU_DATA_KEY: nlu_data.fingerprint(),
FINGERPRINT_NLU_LABELS_KEY: nlu_data.label_fingerprint(),
FINGERPRINT_STORIES_KEY: stories.fingerprint(),
FINGERPRINT_TRAINED_AT_KEY: time.time(),
FINGERPRINT_RASA_VERSION_KEY: rasa.__version__,
}
def _get_fingerprint_of_config(
config: Optional[Dict[Text, Any]],
include_keys: Optional[List[Text]] = None,
exclude_keys: Optional[List[Text]] = None,
) -> Text:
if not config:
return ""
keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys()))
sub_config = {k: config[k] for k in keys if k in config}
return rasa.shared.utils.io.deep_container_fingerprint(sub_config)
def _get_fingerprint_of_config_without_epochs(
config: Optional[Dict[Text, Any]],
) -> Text:
if not config:
return ""
copied_config = copy.deepcopy(config)
for key in ["pipeline", "policies"]:
if copied_config.get(key):
for p in copied_config[key]:
if "epochs" in p:
del p["epochs"]
return rasa.shared.utils.io.deep_container_fingerprint(copied_config)
def fingerprint_from_path(model_path: Text) -> Fingerprint:
"""Load a persisted fingerprint.
Args:
model_path: Path to directory containing the fingerprint.
Returns:
The fingerprint or an empty dict if no fingerprint was found.
"""
if not model_path or not os.path.exists(model_path):
return {}
fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH)
if os.path.isfile(fingerprint_path):
return rasa.shared.utils.io.read_json_file(fingerprint_path)
else:
return {}
def persist_fingerprint(output_path: Text, fingerprint: Fingerprint):
"""Persist a model fingerprint.
Args:
output_path: Directory in which the fingerprint should be saved.
fingerprint: The fingerprint to be persisted.
"""
path = os.path.join(output_path, FINGERPRINT_FILE_PATH)
rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint)
def did_section_fingerprint_change(
fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section
) -> bool:
"""Check whether the fingerprint of a section has changed."""
for k in section.relevant_keys:
if fingerprint1.get(k) != fingerprint2.get(k):
logger.info(f"Data ({k}) for {section.name} section changed.")
return True
return False
def move_model(source: Text, target: Text) -> bool:
"""Move two model directories.
Args:
source: The original folder which should be merged in another.
target: The destination folder where it should be moved to.
Returns:
`True` if the merge was successful, else `False`.
"""
try:
shutil.move(source, target)
return True
except Exception as e:
logging.debug(f"Could not merge model: {e}")
return False
def should_retrain(
new_fingerprint: Fingerprint,
old_model: Text,
train_path: Text,
has_e2e_examples: bool = False,
force_training: bool = False,
) -> FingerprintComparisonResult:
"""Check which components of a model should be retrained.
Args:
new_fingerprint: The fingerprint of the new model to be trained.
old_model: Path to the old zipped model file.
train_path: Path to the directory in which the new model will be trained.
has_e2e_examples: Whether the new training data contains e2e examples.
force_training: Indicates if the model needs to be retrained even if the data
has not changed.
Returns:
A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa
NLU needs to be retrained or not.
"""
fingerprint_comparison = FingerprintComparisonResult()
if old_model is None or not os.path.exists(old_model):
return fingerprint_comparison
with unpack_model(old_model) as unpacked:
last_fingerprint = fingerprint_from_path(unpacked)
old_core, old_nlu = get_model_subdirectories(unpacked)
fingerprint_comparison = FingerprintComparisonResult(
core=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_CORE
),
nlu=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLU
),
nlg=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLG
),
force_training=force_training,
)
# We should retrain core if nlu data changes and there are e2e stories.
if has_e2e_examples and fingerprint_comparison.should_retrain_nlu():
fingerprint_comparison.core = True
core_merge_failed = False
if not fingerprint_comparison.should_retrain_core():
target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
core_merge_failed = not move_model(old_core, target_path)
fingerprint_comparison.core = core_merge_failed
if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed:
# If moving the Core model failed, we should also retrain NLG
fingerprint_comparison.nlg = True
if not fingerprint_comparison.should_retrain_nlu():
target_path = os.path.join(train_path, "nlu")
fingerprint_comparison.nlu = not move_model(old_nlu, target_path)
return fingerprint_comparison
def can_finetune(
last_fingerprint: Fingerprint,
new_fingerprint: Fingerprint,
core: bool = False,
nlu: bool = False,
) -> bool:
"""Checks if components of a model can be finetuned with incremental training.
Args:
last_fingerprint: The fingerprint of the old model to potentially be fine-tuned.
new_fingerprint: The fingerprint of the new model.
core: Check sections for finetuning a core model.
nlu: Check sections for finetuning an nlu model.
Returns:
`True` if the old model can be finetuned, `False` otherwise.
"""
section_keys = [
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY,
]
if core:
section_keys.append(FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY)
if nlu:
section_keys.append(FINGERPRINT_NLU_LABELS_KEY)
fingerprint_changed = did_section_fingerprint_change(
last_fingerprint,
new_fingerprint,
Section(name="finetune", relevant_keys=section_keys),
)
old_model_above_min_version = version.parse(
last_fingerprint.get(FINGERPRINT_RASA_VERSION_KEY)
) >= version.parse(MINIMUM_COMPATIBLE_VERSION)
return old_model_above_min_version and not fingerprint_changed
def package_model(
fingerprint: Fingerprint,
output_directory: Text,
train_path: Text,
fixed_model_name: Optional[Text] = None,
model_prefix: Text = "",
) -> Text:
"""
Compress a trained model.
Args:
fingerprint: fingerprint of the model
output_directory: path to the directory in which the model should be stored
train_path: path to uncompressed model
fixed_model_name: name of the compressed model file
model_prefix: prefix of the compressed model file
Returns: path to 'tar.gz' model file
"""
output_directory = create_output_path(
output_directory, prefix=model_prefix, fixed_name=fixed_model_name
)
create_package_rasa(train_path, output_directory, fingerprint)
print_success(
"Your Rasa model is trained and saved at '{}'.".format(
os.path.abspath(output_directory)
)
)
return output_directory
async def update_model_with_new_domain(
importer: "TrainingDataImporter", unpacked_model_path: Union[Path, Text]
) -> None:
"""Overwrites the domain of an unpacked model with a new domain.
Args:
importer: Importer which provides the new domain.
unpacked_model_path: Path to the unpacked model.
"""
model_path = Path(unpacked_model_path) / DEFAULT_CORE_SUBDIRECTORY_NAME
domain = await importer.get_domain()
domain.persist(model_path / DEFAULT_DOMAIN_PATH)
def get_model_for_finetuning(
previous_model_file: Optional[Union[Path, Text]]
) -> Optional[Text]:
"""Gets validated path for model to finetune.
Args:
previous_model_file: Path to model file which should be used for finetuning or
a directory in case the latest trained model should be used.
Returns:
Path to model archive. `None` if there is no model.
"""
if Path(previous_model_file).is_dir():
logger.debug(
f"Trying to load latest model from '{previous_model_file}' for "
f"finetuning."
)
return get_latest_model(previous_model_file)
if Path(previous_model_file).is_file():
return previous_model_file
logger.debug(
"No valid model for finetuning found as directory either "
"contains no model or model file cannot be found."
)
return None
| import copy
import glob
import hashlib
import logging
import os
import shutil
from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404
import tempfile
import typing
from pathlib import Path
from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
import rasa.shared.utils.io
import rasa.utils.io
from rasa.cli.utils import create_output_path
from rasa.shared.utils.cli import print_success
from rasa.shared.constants import (
CONFIG_KEYS_CORE,
CONFIG_KEYS_NLU,
CONFIG_KEYS,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_CORE_SUBDIRECTORY_NAME,
DEFAULT_NLU_SUBDIRECTORY_NAME,
)
from rasa.exceptions import ModelNotFound
from rasa.utils.common import TempDirectoryPath
if typing.TYPE_CHECKING:
from rasa.shared.importers.importer import TrainingDataImporter
logger = logging.getLogger(__name__)
# Type alias for the fingerprint
Fingerprint = Dict[Text, Union[Text, List[Text], int, float]]
FINGERPRINT_FILE_PATH = "fingerprint.json"
FINGERPRINT_CONFIG_KEY = "config"
FINGERPRINT_CONFIG_CORE_KEY = "core-config"
FINGERPRINT_CONFIG_NLU_KEY = "nlu-config"
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs"
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain"
FINGERPRINT_NLG_KEY = "nlg"
FINGERPRINT_RASA_VERSION_KEY = "version"
FINGERPRINT_STORIES_KEY = "stories"
FINGERPRINT_NLU_DATA_KEY = "messages"
FINGERPRINT_NLU_LABELS_KEY = "nlu_labels"
FINGERPRINT_PROJECT = "project"
FINGERPRINT_TRAINED_AT_KEY = "trained_at"
class Section(NamedTuple):
"""Specifies which fingerprint keys decide whether this sub-model is retrained."""
name: Text
relevant_keys: List[Text]
SECTION_CORE = Section(
name="Core model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_CORE_KEY,
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY,
FINGERPRINT_STORIES_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLU = Section(
name="NLU model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_NLU_KEY,
FINGERPRINT_NLU_DATA_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY])
class FingerprintComparisonResult:
"""Container for the results of a fingerprint comparison."""
def __init__(
self,
nlu: bool = True,
core: bool = True,
nlg: bool = True,
force_training: bool = False,
):
"""Creates a `FingerprintComparisonResult` instance.
Args:
nlu: `True` if the NLU model should be retrained.
core: `True` if the Core model should be retrained.
nlg: `True` if the responses in the domain should be updated.
force_training: `True` if a training of all parts is forced.
"""
self.nlu = nlu
self.core = core
self.nlg = nlg
self.force_training = force_training
def is_training_required(self) -> bool:
"""Check if anything has to be retrained."""
return any([self.nlg, self.nlu, self.core, self.force_training])
def should_retrain_core(self) -> bool:
"""Check if the Core model has to be updated."""
return self.force_training or self.core
def should_retrain_nlg(self) -> bool:
"""Check if the responses have to be updated."""
return self.should_retrain_core() or self.nlg
def should_retrain_nlu(self) -> bool:
"""Check if the NLU model has to be updated."""
return self.force_training or self.nlu
def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath:
"""Get a model and unpack it. Raises a `ModelNotFound` exception if
no model could be found at the provided path.
Args:
model_path: Path to the zipped model. If it's a directory, the latest
trained model is returned.
Returns:
Path to the unpacked model.
"""
if not model_path:
raise ModelNotFound("No path specified.")
elif not os.path.exists(model_path):
raise ModelNotFound(f"No file or directory at '{model_path}'.")
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
if not model_path:
raise ModelNotFound(
f"Could not find any Rasa model files in '{model_path}'."
)
elif not model_path.endswith(".tar.gz"):
raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.")
try:
model_relative_path = os.path.relpath(model_path)
except ValueError:
model_relative_path = model_path
logger.info(f"Loading model {model_relative_path}...")
return unpack_model(model_path)
def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]:
"""Get the latest model from a path.
Args:
model_path: Path to a directory containing zipped models.
Returns:
Path to latest model in the given directory.
"""
if not os.path.exists(model_path) or os.path.isfile(model_path):
model_path = os.path.dirname(model_path)
list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz"))
if len(list_of_files) == 0:
return None
return max(list_of_files, key=os.path.getctime)
def unpack_model(
model_file: Text, working_directory: Optional[Union[Path, Text]] = None
) -> TempDirectoryPath:
"""Unpack a zipped Rasa model.
Args:
model_file: Path to zipped model.
working_directory: Location where the model should be unpacked to.
If `None` a temporary directory will be created.
Returns:
Path to unpacked Rasa model.
"""
import tarfile
if working_directory is None:
working_directory = tempfile.mkdtemp()
# All files are in a subdirectory.
try:
with tarfile.open(model_file, mode="r:gz") as tar:
tar.extractall(working_directory)
logger.debug(f"Extracted model to '{working_directory}'.")
except Exception as e:
logger.error(f"Failed to extract model at {model_file}. Error: {e}")
raise
return TempDirectoryPath(working_directory)
def get_model_subdirectories(
unpacked_model_path: Text,
) -> Tuple[Optional[Text], Optional[Text]]:
"""Return paths for Core and NLU model directories, if they exist.
If neither directories exist, a `ModelNotFound` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME)
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise ModelNotFound(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
def create_package_rasa(
training_directory: Text,
output_filename: Text,
fingerprint: Optional[Fingerprint] = None,
) -> Text:
"""Create a zipped Rasa model from trained model files.
Args:
training_directory: Path to the directory which contains the trained
model files.
output_filename: Name of the zipped model file to be created.
fingerprint: A unique fingerprint to identify the model version.
Returns:
Path to zipped model.
"""
import tarfile
if fingerprint:
persist_fingerprint(training_directory, fingerprint)
output_directory = os.path.dirname(output_filename)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with tarfile.open(output_filename, "w:gz") as tar:
for elem in os.scandir(training_directory):
tar.add(elem.path, arcname=elem.name)
shutil.rmtree(training_directory)
return output_filename
def project_fingerprint() -> Optional[Text]:
"""Create a hash for the project in the current working directory.
Returns:
project hash
"""
try:
remote = check_output( # skipcq:BAN-B607,BAN-B603
["git", "remote", "get-url", "origin"], stderr=DEVNULL
)
return hashlib.sha256(remote).hexdigest()
except (CalledProcessError, OSError):
return None
async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint:
"""Create a model fingerprint from its used configuration and training data.
Args:
file_importer: File importer which provides the training data and model config.
Returns:
The fingerprint.
"""
import time
config = await file_importer.get_config()
domain = await file_importer.get_domain()
stories = await file_importer.get_stories()
nlu_data = await file_importer.get_nlu_data()
responses = domain.responses
# Do a copy of the domain to not change the actual domain (shallow is enough)
domain = copy.copy(domain)
# don't include the response texts in the fingerprint.
# Their fingerprint is separate.
domain.responses = {}
return {
FINGERPRINT_CONFIG_KEY: _get_fingerprint_of_config(
config, exclude_keys=CONFIG_KEYS
),
FINGERPRINT_CONFIG_CORE_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_CORE
),
FINGERPRINT_CONFIG_NLU_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_NLU
),
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY: _get_fingerprint_of_config_without_epochs(
config
),
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain.fingerprint(),
FINGERPRINT_NLG_KEY: rasa.shared.utils.io.deep_container_fingerprint(responses),
FINGERPRINT_PROJECT: project_fingerprint(),
FINGERPRINT_NLU_DATA_KEY: nlu_data.fingerprint(),
FINGERPRINT_NLU_LABELS_KEY: nlu_data.label_fingerprint(),
FINGERPRINT_STORIES_KEY: stories.fingerprint(),
FINGERPRINT_TRAINED_AT_KEY: time.time(),
FINGERPRINT_RASA_VERSION_KEY: rasa.__version__,
}
def _get_fingerprint_of_config(
config: Optional[Dict[Text, Any]],
include_keys: Optional[List[Text]] = None,
exclude_keys: Optional[List[Text]] = None,
) -> Text:
if not config:
return ""
keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys()))
sub_config = {k: config[k] for k in keys if k in config}
return rasa.shared.utils.io.deep_container_fingerprint(sub_config)
def _get_fingerprint_of_config_without_epochs(
config: Optional[Dict[Text, Any]],
) -> Text:
if not config:
return ""
copied_config = copy.deepcopy(config)
for key in ["pipeline", "policies"]:
if copied_config.get(key):
for p in copied_config[key]:
if "epochs" in p:
del p["epochs"]
return rasa.shared.utils.io.deep_container_fingerprint(copied_config)
def fingerprint_from_path(model_path: Text) -> Fingerprint:
"""Load a persisted fingerprint.
Args:
model_path: Path to directory containing the fingerprint.
Returns:
The fingerprint or an empty dict if no fingerprint was found.
"""
if not model_path or not os.path.exists(model_path):
return {}
fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH)
if os.path.isfile(fingerprint_path):
return rasa.shared.utils.io.read_json_file(fingerprint_path)
else:
return {}
def persist_fingerprint(output_path: Text, fingerprint: Fingerprint):
"""Persist a model fingerprint.
Args:
output_path: Directory in which the fingerprint should be saved.
fingerprint: The fingerprint to be persisted.
"""
path = os.path.join(output_path, FINGERPRINT_FILE_PATH)
rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint)
def did_section_fingerprint_change(
fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section
) -> bool:
"""Check whether the fingerprint of a section has changed."""
for k in section.relevant_keys:
if fingerprint1.get(k) != fingerprint2.get(k):
logger.info(f"Data ({k}) for {section.name} section changed.")
return True
return False
def move_model(source: Text, target: Text) -> bool:
"""Move two model directories.
Args:
source: The original folder which should be merged in another.
target: The destination folder where it should be moved to.
Returns:
`True` if the merge was successful, else `False`.
"""
try:
shutil.move(source, target)
return True
except Exception as e:
logging.debug(f"Could not merge model: {e}")
return False
def should_retrain(
new_fingerprint: Fingerprint,
old_model: Text,
train_path: Text,
has_e2e_examples: bool = False,
force_training: bool = False,
) -> FingerprintComparisonResult:
"""Check which components of a model should be retrained.
Args:
new_fingerprint: The fingerprint of the new model to be trained.
old_model: Path to the old zipped model file.
train_path: Path to the directory in which the new model will be trained.
has_e2e_examples: Whether the new training data contains e2e examples.
force_training: Indicates if the model needs to be retrained even if the data
has not changed.
Returns:
A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa
NLU needs to be retrained or not.
"""
fingerprint_comparison = FingerprintComparisonResult()
if old_model is None or not os.path.exists(old_model):
return fingerprint_comparison
with unpack_model(old_model) as unpacked:
last_fingerprint = fingerprint_from_path(unpacked)
old_core, old_nlu = get_model_subdirectories(unpacked)
fingerprint_comparison = FingerprintComparisonResult(
core=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_CORE
),
nlu=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLU
),
nlg=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLG
),
force_training=force_training,
)
# We should retrain core if nlu data changes and there are e2e stories.
if has_e2e_examples and fingerprint_comparison.should_retrain_nlu():
fingerprint_comparison.core = True
core_merge_failed = False
if not fingerprint_comparison.should_retrain_core():
target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
core_merge_failed = not move_model(old_core, target_path)
fingerprint_comparison.core = core_merge_failed
if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed:
# If moving the Core model failed, we should also retrain NLG
fingerprint_comparison.nlg = True
if not fingerprint_comparison.should_retrain_nlu():
target_path = os.path.join(train_path, "nlu")
fingerprint_comparison.nlu = not move_model(old_nlu, target_path)
return fingerprint_comparison
def can_finetune(
last_fingerprint: Fingerprint,
new_fingerprint: Fingerprint,
core: bool = False,
nlu: bool = False,
) -> bool:
"""Checks if components of a model can be finetuned with incremental training.
Args:
last_fingerprint: The fingerprint of the old model to potentially be fine-tuned.
new_fingerprint: The fingerprint of the new model.
core: Check sections for finetuning a core model.
nlu: Check sections for finetuning an nlu model.
Returns:
`True` if the old model can be finetuned, `False` otherwise.
"""
section_keys = [
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY,
]
if core:
section_keys.append(FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY)
if nlu:
section_keys.append(FINGERPRINT_NLU_LABELS_KEY)
fingerprint_changed = did_section_fingerprint_change(
last_fingerprint,
new_fingerprint,
Section(name="finetune", relevant_keys=section_keys),
)
old_model_above_min_version = version.parse(
last_fingerprint.get(FINGERPRINT_RASA_VERSION_KEY)
) >= version.parse(MINIMUM_COMPATIBLE_VERSION)
return old_model_above_min_version and not fingerprint_changed
def package_model(
fingerprint: Fingerprint,
output_directory: Text,
train_path: Text,
fixed_model_name: Optional[Text] = None,
model_prefix: Text = "",
) -> Text:
"""
Compress a trained model.
Args:
fingerprint: fingerprint of the model
output_directory: path to the directory in which the model should be stored
train_path: path to uncompressed model
fixed_model_name: name of the compressed model file
model_prefix: prefix of the compressed model file
Returns: path to 'tar.gz' model file
"""
output_directory = create_output_path(
output_directory, prefix=model_prefix, fixed_name=fixed_model_name
)
create_package_rasa(train_path, output_directory, fingerprint)
print_success(
"Your Rasa model is trained and saved at '{}'.".format(
os.path.abspath(output_directory)
)
)
return output_directory
async def update_model_with_new_domain(
importer: "TrainingDataImporter", unpacked_model_path: Union[Path, Text]
) -> None:
"""Overwrites the domain of an unpacked model with a new domain.
Args:
importer: Importer which provides the new domain.
unpacked_model_path: Path to the unpacked model.
"""
model_path = Path(unpacked_model_path) / DEFAULT_CORE_SUBDIRECTORY_NAME
domain = await importer.get_domain()
domain.persist(model_path / DEFAULT_DOMAIN_PATH)
def get_model_for_finetuning(
previous_model_file: Optional[Union[Path, Text]]
) -> Optional[Text]:
"""Gets validated path for model to finetune.
Args:
previous_model_file: Path to model file which should be used for finetuning or
a directory in case the latest trained model should be used.
Returns:
Path to model archive. `None` if there is no model.
"""
if Path(previous_model_file).is_dir():
logger.debug(
f"Trying to load latest model from '{previous_model_file}' for "
f"finetuning."
)
return get_latest_model(previous_model_file)
if Path(previous_model_file).is_file():
return previous_model_file
logger.debug(
"No valid model for finetuning found as directory either "
"contains no model or model file cannot be found."
)
return None
| en | 0.828516 | # skipcq:BAN-B404 # Type alias for the fingerprint Specifies which fingerprint keys decide whether this sub-model is retrained. Container for the results of a fingerprint comparison. Creates a `FingerprintComparisonResult` instance. Args: nlu: `True` if the NLU model should be retrained. core: `True` if the Core model should be retrained. nlg: `True` if the responses in the domain should be updated. force_training: `True` if a training of all parts is forced. Check if anything has to be retrained. Check if the Core model has to be updated. Check if the responses have to be updated. Check if the NLU model has to be updated. Get a model and unpack it. Raises a `ModelNotFound` exception if no model could be found at the provided path. Args: model_path: Path to the zipped model. If it's a directory, the latest trained model is returned. Returns: Path to the unpacked model. Get the latest model from a path. Args: model_path: Path to a directory containing zipped models. Returns: Path to latest model in the given directory. Unpack a zipped Rasa model. Args: model_file: Path to zipped model. working_directory: Location where the model should be unpacked to. If `None` a temporary directory will be created. Returns: Path to unpacked Rasa model. # All files are in a subdirectory. Return paths for Core and NLU model directories, if they exist. If neither directories exist, a `ModelNotFound` exception is raised. Args: unpacked_model_path: Path to unpacked Rasa model. Returns: Tuple (path to Core subdirectory if it exists or `None` otherwise, path to NLU subdirectory if it exists or `None` otherwise). Create a zipped Rasa model from trained model files. Args: training_directory: Path to the directory which contains the trained model files. output_filename: Name of the zipped model file to be created. fingerprint: A unique fingerprint to identify the model version. Returns: Path to zipped model. Create a hash for the project in the current working directory. Returns: project hash # skipcq:BAN-B607,BAN-B603 Create a model fingerprint from its used configuration and training data. Args: file_importer: File importer which provides the training data and model config. Returns: The fingerprint. # Do a copy of the domain to not change the actual domain (shallow is enough) # don't include the response texts in the fingerprint. # Their fingerprint is separate. Load a persisted fingerprint. Args: model_path: Path to directory containing the fingerprint. Returns: The fingerprint or an empty dict if no fingerprint was found. Persist a model fingerprint. Args: output_path: Directory in which the fingerprint should be saved. fingerprint: The fingerprint to be persisted. Check whether the fingerprint of a section has changed. Move two model directories. Args: source: The original folder which should be merged in another. target: The destination folder where it should be moved to. Returns: `True` if the merge was successful, else `False`. Check which components of a model should be retrained. Args: new_fingerprint: The fingerprint of the new model to be trained. old_model: Path to the old zipped model file. train_path: Path to the directory in which the new model will be trained. has_e2e_examples: Whether the new training data contains e2e examples. force_training: Indicates if the model needs to be retrained even if the data has not changed. Returns: A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa NLU needs to be retrained or not. # We should retrain core if nlu data changes and there are e2e stories. # If moving the Core model failed, we should also retrain NLG Checks if components of a model can be finetuned with incremental training. Args: last_fingerprint: The fingerprint of the old model to potentially be fine-tuned. new_fingerprint: The fingerprint of the new model. core: Check sections for finetuning a core model. nlu: Check sections for finetuning an nlu model. Returns: `True` if the old model can be finetuned, `False` otherwise. Compress a trained model. Args: fingerprint: fingerprint of the model output_directory: path to the directory in which the model should be stored train_path: path to uncompressed model fixed_model_name: name of the compressed model file model_prefix: prefix of the compressed model file Returns: path to 'tar.gz' model file Overwrites the domain of an unpacked model with a new domain. Args: importer: Importer which provides the new domain. unpacked_model_path: Path to the unpacked model. Gets validated path for model to finetune. Args: previous_model_file: Path to model file which should be used for finetuning or a directory in case the latest trained model should be used. Returns: Path to model archive. `None` if there is no model. | 1.847516 | 2 |
algorithmic_trading/backester_framework_test.py | CatalaniCD/quantitative_finance | 1 | 735 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 16 11:20:01 2021
@author: q
GOAL : develop a backtester from a .py framework / library
# installation :
pip install backtesting
# Documentation
Index :
- Manuals
- Tutorials
- Example Strategies
- FAQ
- License
- API Reference Documentation
source : https://kernc.github.io/backtesting.py/doc/backtesting/
# Features
* Simple, well-documented API
* Blazing fast execution
* Built-in optimizer
* Library of composable base strategies and utilities
* Indicator-library-agnostic
* Supports any financial instrument with candlestick data
* Detailed results
* Interactive visualizations
"""
# =============================================================================
# imports and settings
# =============================================================================
# data handling
import pandas as pd
import numpy as np
# import backtesting and set options
import backtesting
# Set notebook False
backtesting.set_bokeh_output(notebook=False)
from backtesting import Backtest, Strategy
from backtesting.lib import crossover, cross
from backtesting.test import SMA, GOOG
# =============================================================================
# strategy definition
# =============================================================================
class PriceAboveSMA(Strategy):
_ma_period = 21 # Moving Average
def init(self):
# compute momentum
""" Simple Moving Average Calc"""
self.sma = self.I(SMA, self.data.Close, self._ma_period)
def next(self):
price = self.data.Close[-1]
if not self.position and price > self.sma[-1]:
# market entry
self.buy()
elif self.position and price < self.sma[-1]:
# market exit
self.position.close()
# =============================================================================
# Program Execution
# =============================================================================
if __name__ == '__main__':
""" Instantiate the Backtester """
backtester = Backtest(GOOG, PriceAboveSMA, commission=.002,
exclusive_orders=True, cash = 10000)
PLOT = True
""" Run a Single Backtest """
stats = backtester.run()
print(stats)
if PLOT: backtester.plot()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 16 11:20:01 2021
@author: q
GOAL : develop a backtester from a .py framework / library
# installation :
pip install backtesting
# Documentation
Index :
- Manuals
- Tutorials
- Example Strategies
- FAQ
- License
- API Reference Documentation
source : https://kernc.github.io/backtesting.py/doc/backtesting/
# Features
* Simple, well-documented API
* Blazing fast execution
* Built-in optimizer
* Library of composable base strategies and utilities
* Indicator-library-agnostic
* Supports any financial instrument with candlestick data
* Detailed results
* Interactive visualizations
"""
# =============================================================================
# imports and settings
# =============================================================================
# data handling
import pandas as pd
import numpy as np
# import backtesting and set options
import backtesting
# Set notebook False
backtesting.set_bokeh_output(notebook=False)
from backtesting import Backtest, Strategy
from backtesting.lib import crossover, cross
from backtesting.test import SMA, GOOG
# =============================================================================
# strategy definition
# =============================================================================
class PriceAboveSMA(Strategy):
_ma_period = 21 # Moving Average
def init(self):
# compute momentum
""" Simple Moving Average Calc"""
self.sma = self.I(SMA, self.data.Close, self._ma_period)
def next(self):
price = self.data.Close[-1]
if not self.position and price > self.sma[-1]:
# market entry
self.buy()
elif self.position and price < self.sma[-1]:
# market exit
self.position.close()
# =============================================================================
# Program Execution
# =============================================================================
if __name__ == '__main__':
""" Instantiate the Backtester """
backtester = Backtest(GOOG, PriceAboveSMA, commission=.002,
exclusive_orders=True, cash = 10000)
PLOT = True
""" Run a Single Backtest """
stats = backtester.run()
print(stats)
if PLOT: backtester.plot()
| en | 0.510676 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Fri Jul 16 11:20:01 2021 @author: q GOAL : develop a backtester from a .py framework / library # installation : pip install backtesting # Documentation Index : - Manuals - Tutorials - Example Strategies - FAQ - License - API Reference Documentation source : https://kernc.github.io/backtesting.py/doc/backtesting/ # Features * Simple, well-documented API * Blazing fast execution * Built-in optimizer * Library of composable base strategies and utilities * Indicator-library-agnostic * Supports any financial instrument with candlestick data * Detailed results * Interactive visualizations # ============================================================================= # imports and settings # ============================================================================= # data handling # import backtesting and set options # Set notebook False # ============================================================================= # strategy definition # ============================================================================= # Moving Average # compute momentum Simple Moving Average Calc # market entry # market exit # ============================================================================= # Program Execution # ============================================================================= Instantiate the Backtester Run a Single Backtest | 2.156526 | 2 |
Sec_10_expr_lambdas_fun_integradas/f_generators.py | PauloAlexSilva/Python | 0 | 736 | <gh_stars>0
""""
Generator Expression
Em aulas anteriores foi abordado:
- List Comprehension;
- Dictionary Comprehension;
- Set Comprehension.
Não foi abordado:
- Tuple Comprehension ... porque elas se chamam Generators
nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa']
print(any8[nomes[0] == 'C' for nome in nomes])
# Poderia ter sido feito usando os Generators
nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa']
print(any(nome[0] == 'C' for nome in nomes))
# List Comprehension
res = [nome[0] == 'C' for nome in nomes]
print(type(res))
print(res) # [True, True, True, True, True, False]
# Generator - mais efeciente
res2 = (nome[0] == 'C' for nome in nomes)
print(type(res2))
print(res2)
# O que faz a função de getsizeof()? -> retorna a quantidade de bytes em memória do elemento
# passado como parâmetro
from sys import getsizeof
# Mostra quantos bytes a string 'Paulo' está ocupando em memória.
# Quanto maior a string mais espaço ocupa.
print(getsizeof('Paulo'))
print(getsizeof('Quanto maior a string mais espaço ocupa.'))
print(getsizeof(9))
print(getsizeof(91))
print(getsizeof(12345667890))
print(getsizeof(True))
from sys import getsizeof
# Gerando uma lista de números com List Comprehension
list_comp = getsizeof([x * 10 for x in range(1000)])
# Gerando uma lista de números com Set Comprehension
set_comp = getsizeof({x * 10 for x in range(1000)})
# Gerando uma lista de números com Dictionary Comprehension
dic_comp = getsizeof({x: x * 10 for x in range(1000)})
# Gerando uma lista de números com Generator
gen = getsizeof(x * 10 for x in range(1000))
print('Para fazer a mesma gastamos em memória: ')
print(f'List Comprehension: {list_comp} bytes!')
print(f'Set Comprehension: {set_comp} bytes!')
print(f'Dictionary Comprehension: {dic_comp} bytes!')
print(f'Generator Expression: {gen} bytes!')
Para fazer a mesma gastamos em memória:
List Comprehension: 8856 bytes!
Set Comprehension: 32984 bytes!
Dictionary Comprehension: 36960 bytes!
Generator Expression: 112 bytes!
"""
# Posso iterar no Generator Expression? Sim
gen = (x * 10 for x in range(1000))
print(gen)
print(type(gen))
for num in gen:
print(num)
| """"
Generator Expression
Em aulas anteriores foi abordado:
- List Comprehension;
- Dictionary Comprehension;
- Set Comprehension.
Não foi abordado:
- Tuple Comprehension ... porque elas se chamam Generators
nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa']
print(any8[nomes[0] == 'C' for nome in nomes])
# Poderia ter sido feito usando os Generators
nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa']
print(any(nome[0] == 'C' for nome in nomes))
# List Comprehension
res = [nome[0] == 'C' for nome in nomes]
print(type(res))
print(res) # [True, True, True, True, True, False]
# Generator - mais efeciente
res2 = (nome[0] == 'C' for nome in nomes)
print(type(res2))
print(res2)
# O que faz a função de getsizeof()? -> retorna a quantidade de bytes em memória do elemento
# passado como parâmetro
from sys import getsizeof
# Mostra quantos bytes a string 'Paulo' está ocupando em memória.
# Quanto maior a string mais espaço ocupa.
print(getsizeof('Paulo'))
print(getsizeof('Quanto maior a string mais espaço ocupa.'))
print(getsizeof(9))
print(getsizeof(91))
print(getsizeof(12345667890))
print(getsizeof(True))
from sys import getsizeof
# Gerando uma lista de números com List Comprehension
list_comp = getsizeof([x * 10 for x in range(1000)])
# Gerando uma lista de números com Set Comprehension
set_comp = getsizeof({x * 10 for x in range(1000)})
# Gerando uma lista de números com Dictionary Comprehension
dic_comp = getsizeof({x: x * 10 for x in range(1000)})
# Gerando uma lista de números com Generator
gen = getsizeof(x * 10 for x in range(1000))
print('Para fazer a mesma gastamos em memória: ')
print(f'List Comprehension: {list_comp} bytes!')
print(f'Set Comprehension: {set_comp} bytes!')
print(f'Dictionary Comprehension: {dic_comp} bytes!')
print(f'Generator Expression: {gen} bytes!')
Para fazer a mesma gastamos em memória:
List Comprehension: 8856 bytes!
Set Comprehension: 32984 bytes!
Dictionary Comprehension: 36960 bytes!
Generator Expression: 112 bytes!
"""
# Posso iterar no Generator Expression? Sim
gen = (x * 10 for x in range(1000))
print(gen)
print(type(gen))
for num in gen:
print(num) | pt | 0.758922 | " Generator Expression Em aulas anteriores foi abordado: - List Comprehension; - Dictionary Comprehension; - Set Comprehension. Não foi abordado: - Tuple Comprehension ... porque elas se chamam Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any8[nomes[0] == 'C' for nome in nomes]) # Poderia ter sido feito usando os Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any(nome[0] == 'C' for nome in nomes)) # List Comprehension res = [nome[0] == 'C' for nome in nomes] print(type(res)) print(res) # [True, True, True, True, True, False] # Generator - mais efeciente res2 = (nome[0] == 'C' for nome in nomes) print(type(res2)) print(res2) # O que faz a função de getsizeof()? -> retorna a quantidade de bytes em memória do elemento # passado como parâmetro from sys import getsizeof # Mostra quantos bytes a string 'Paulo' está ocupando em memória. # Quanto maior a string mais espaço ocupa. print(getsizeof('Paulo')) print(getsizeof('Quanto maior a string mais espaço ocupa.')) print(getsizeof(9)) print(getsizeof(91)) print(getsizeof(12345667890)) print(getsizeof(True)) from sys import getsizeof # Gerando uma lista de números com List Comprehension list_comp = getsizeof([x * 10 for x in range(1000)]) # Gerando uma lista de números com Set Comprehension set_comp = getsizeof({x * 10 for x in range(1000)}) # Gerando uma lista de números com Dictionary Comprehension dic_comp = getsizeof({x: x * 10 for x in range(1000)}) # Gerando uma lista de números com Generator gen = getsizeof(x * 10 for x in range(1000)) print('Para fazer a mesma gastamos em memória: ') print(f'List Comprehension: {list_comp} bytes!') print(f'Set Comprehension: {set_comp} bytes!') print(f'Dictionary Comprehension: {dic_comp} bytes!') print(f'Generator Expression: {gen} bytes!') Para fazer a mesma gastamos em memória: List Comprehension: 8856 bytes! Set Comprehension: 32984 bytes! Dictionary Comprehension: 36960 bytes! Generator Expression: 112 bytes! # Posso iterar no Generator Expression? Sim | 4.052547 | 4 |
python/ordenacao.py | valdirsjr/learning.data | 0 | 737 | numero1 = int(input("Digite o primeiro número: "))
numero2 = int(input("Digite o segundo número: "))
numero3 = int(input("Digite o terceiro número: "))
if (numero1 < numero2 and numero2 < numero3):
print("crescente")
else:
print("não está em ordem crescente") | numero1 = int(input("Digite o primeiro número: "))
numero2 = int(input("Digite o segundo número: "))
numero3 = int(input("Digite o terceiro número: "))
if (numero1 < numero2 and numero2 < numero3):
print("crescente")
else:
print("não está em ordem crescente") | none | 1 | 4.150928 | 4 |
|
_sources/5-extra/opg-parameters-sneeuwvlok_solution.py | kooi/ippt-od | 1 | 738 | <reponame>kooi/ippt-od
import turtle
tina = turtle.Turtle()
tina.shape("turtle")
tina.speed(10)
def parallellogram(lengte):
for i in range(2):
tina.forward(lengte)
tina.right(60)
tina.forward(lengte)
tina.right(120)
def sneeuwvlok(lengte, num):
for i in range(num):
parallellogram(lengte)
tina.right(360.0/num) # 360.0 zorgt voor cast van int naar float
sneeuwvlok(30, 6)
| import turtle
tina = turtle.Turtle()
tina.shape("turtle")
tina.speed(10)
def parallellogram(lengte):
for i in range(2):
tina.forward(lengte)
tina.right(60)
tina.forward(lengte)
tina.right(120)
def sneeuwvlok(lengte, num):
for i in range(num):
parallellogram(lengte)
tina.right(360.0/num) # 360.0 zorgt voor cast van int naar float
sneeuwvlok(30, 6) | nl | 0.998578 | # 360.0 zorgt voor cast van int naar float | 3.791787 | 4 |
nikola/plugins/task_render_listings.py | servalproject/nikola | 1 | 739 | # Copyright (c) 2012 <NAME> y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals, print_function
import os
from pygments import highlight
from pygments.lexers import get_lexer_for_filename, TextLexer
from pygments.formatters import HtmlFormatter
from nikola.plugin_categories import Task
from nikola import utils
class Listings(Task):
"""Render pretty listings."""
name = "render_listings"
def gen_tasks(self):
"""Render pretty code listings."""
kw = {
"default_lang": self.site.config["DEFAULT_LANG"],
"listings_folder": self.site.config["LISTINGS_FOLDER"],
"output_folder": self.site.config["OUTPUT_FOLDER"],
"index_file": self.site.config["INDEX_FILE"],
}
# Things to ignore in listings
ignored_extensions = (".pyc",)
def render_listing(in_name, out_name, folders=[], files=[]):
if in_name:
with open(in_name, 'r') as fd:
try:
lexer = get_lexer_for_filename(in_name)
except:
lexer = TextLexer()
code = highlight(fd.read(), lexer,
HtmlFormatter(cssclass='code',
linenos="table", nowrap=False,
lineanchors=utils.slugify(f),
anchorlinenos=True))
title = os.path.basename(in_name)
else:
code = ''
title = ''
crumbs = utils.get_crumbs(os.path.relpath(out_name,
kw['output_folder']),
is_file=True)
context = {
'code': code,
'title': title,
'crumbs': crumbs,
'lang': kw['default_lang'],
'folders': folders,
'files': files,
'description': title,
}
self.site.render_template('listing.tmpl', out_name,
context)
flag = True
template_deps = self.site.template_system.template_deps('listing.tmpl')
for root, dirs, files in os.walk(kw['listings_folder']):
flag = False
# Render all files
out_name = os.path.join(
kw['output_folder'],
root, kw['index_file']
)
yield {
'basename': self.name,
'name': out_name,
'file_dep': template_deps,
'targets': [out_name],
'actions': [(render_listing, [None, out_name, dirs, files])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(
self.site.config['GLOBAL_CONTEXT'])],
'clean': True,
}
for f in files:
ext = os.path.splitext(f)[-1]
if ext in ignored_extensions:
continue
in_name = os.path.join(root, f)
out_name = os.path.join(
kw['output_folder'],
root,
f) + '.html'
yield {
'basename': self.name,
'name': out_name,
'file_dep': template_deps + [in_name],
'targets': [out_name],
'actions': [(render_listing, [in_name, out_name])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(
self.site.config['GLOBAL_CONTEXT'])],
'clean': True,
}
if flag:
yield {
'basename': self.name,
'actions': [],
}
| # Copyright (c) 2012 <NAME> y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals, print_function
import os
from pygments import highlight
from pygments.lexers import get_lexer_for_filename, TextLexer
from pygments.formatters import HtmlFormatter
from nikola.plugin_categories import Task
from nikola import utils
class Listings(Task):
"""Render pretty listings."""
name = "render_listings"
def gen_tasks(self):
"""Render pretty code listings."""
kw = {
"default_lang": self.site.config["DEFAULT_LANG"],
"listings_folder": self.site.config["LISTINGS_FOLDER"],
"output_folder": self.site.config["OUTPUT_FOLDER"],
"index_file": self.site.config["INDEX_FILE"],
}
# Things to ignore in listings
ignored_extensions = (".pyc",)
def render_listing(in_name, out_name, folders=[], files=[]):
if in_name:
with open(in_name, 'r') as fd:
try:
lexer = get_lexer_for_filename(in_name)
except:
lexer = TextLexer()
code = highlight(fd.read(), lexer,
HtmlFormatter(cssclass='code',
linenos="table", nowrap=False,
lineanchors=utils.slugify(f),
anchorlinenos=True))
title = os.path.basename(in_name)
else:
code = ''
title = ''
crumbs = utils.get_crumbs(os.path.relpath(out_name,
kw['output_folder']),
is_file=True)
context = {
'code': code,
'title': title,
'crumbs': crumbs,
'lang': kw['default_lang'],
'folders': folders,
'files': files,
'description': title,
}
self.site.render_template('listing.tmpl', out_name,
context)
flag = True
template_deps = self.site.template_system.template_deps('listing.tmpl')
for root, dirs, files in os.walk(kw['listings_folder']):
flag = False
# Render all files
out_name = os.path.join(
kw['output_folder'],
root, kw['index_file']
)
yield {
'basename': self.name,
'name': out_name,
'file_dep': template_deps,
'targets': [out_name],
'actions': [(render_listing, [None, out_name, dirs, files])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(
self.site.config['GLOBAL_CONTEXT'])],
'clean': True,
}
for f in files:
ext = os.path.splitext(f)[-1]
if ext in ignored_extensions:
continue
in_name = os.path.join(root, f)
out_name = os.path.join(
kw['output_folder'],
root,
f) + '.html'
yield {
'basename': self.name,
'name': out_name,
'file_dep': template_deps + [in_name],
'targets': [out_name],
'actions': [(render_listing, [in_name, out_name])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(
self.site.config['GLOBAL_CONTEXT'])],
'clean': True,
}
if flag:
yield {
'basename': self.name,
'actions': [],
}
| en | 0.741922 | # Copyright (c) 2012 <NAME> y otros. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Render pretty listings. Render pretty code listings. # Things to ignore in listings # Render all files # This is necessary to reflect changes in blog title, # sidebar links, etc. # This is necessary to reflect changes in blog title, # sidebar links, etc. | 1.916943 | 2 |
sdk/python/pulumi_azure_native/storage/storage_account_static_website.py | sebtelko/pulumi-azure-native | 0 | 740 | <reponame>sebtelko/pulumi-azure-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['StorageAccountStaticWebsiteArgs', 'StorageAccountStaticWebsite']
@pulumi.input_type
class StorageAccountStaticWebsiteArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
error404_document: Optional[pulumi.Input[str]] = None,
index_document: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a StorageAccountStaticWebsite resource.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
:param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if error404_document is not None:
pulumi.set(__self__, "error404_document", error404_document)
if index_document is not None:
pulumi.set(__self__, "index_document", index_document)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the storage account within the specified resource group.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="error404Document")
def error404_document(self) -> Optional[pulumi.Input[str]]:
"""
The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
"""
return pulumi.get(self, "error404_document")
@error404_document.setter
def error404_document(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "error404_document", value)
@property
@pulumi.getter(name="indexDocument")
def index_document(self) -> Optional[pulumi.Input[str]]:
"""
The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
"""
return pulumi.get(self, "index_document")
@index_document.setter
def index_document(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "index_document", value)
class StorageAccountStaticWebsite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
error404_document: Optional[pulumi.Input[str]] = None,
index_document: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Enables the static website feature of a storage account.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group.
:param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
:param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StorageAccountStaticWebsiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Enables the static website feature of a storage account.
:param str resource_name: The name of the resource.
:param StorageAccountStaticWebsiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageAccountStaticWebsiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
error404_document: Optional[pulumi.Input[str]] = None,
index_document: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageAccountStaticWebsiteArgs.__new__(StorageAccountStaticWebsiteArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["error404_document"] = error404_document
__props__.__dict__["index_document"] = index_document
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["container_name"] = None
super(StorageAccountStaticWebsite, __self__).__init__(
'azure-native:storage:StorageAccountStaticWebsite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageAccountStaticWebsite':
"""
Get an existing StorageAccountStaticWebsite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageAccountStaticWebsiteArgs.__new__(StorageAccountStaticWebsiteArgs)
__props__.__dict__["container_name"] = None
__props__.__dict__["error404_document"] = None
__props__.__dict__["index_document"] = None
return StorageAccountStaticWebsite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Output[str]:
"""
The name of the container to upload blobs to.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="error404Document")
def error404_document(self) -> pulumi.Output[Optional[str]]:
"""
The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
"""
return pulumi.get(self, "error404_document")
@property
@pulumi.getter(name="indexDocument")
def index_document(self) -> pulumi.Output[Optional[str]]:
"""
The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
"""
return pulumi.get(self, "index_document")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['StorageAccountStaticWebsiteArgs', 'StorageAccountStaticWebsite']
@pulumi.input_type
class StorageAccountStaticWebsiteArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
error404_document: Optional[pulumi.Input[str]] = None,
index_document: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a StorageAccountStaticWebsite resource.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
:param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if error404_document is not None:
pulumi.set(__self__, "error404_document", error404_document)
if index_document is not None:
pulumi.set(__self__, "index_document", index_document)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the storage account within the specified resource group.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="error404Document")
def error404_document(self) -> Optional[pulumi.Input[str]]:
"""
The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
"""
return pulumi.get(self, "error404_document")
@error404_document.setter
def error404_document(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "error404_document", value)
@property
@pulumi.getter(name="indexDocument")
def index_document(self) -> Optional[pulumi.Input[str]]:
"""
The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
"""
return pulumi.get(self, "index_document")
@index_document.setter
def index_document(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "index_document", value)
class StorageAccountStaticWebsite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
error404_document: Optional[pulumi.Input[str]] = None,
index_document: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Enables the static website feature of a storage account.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group.
:param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
:param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StorageAccountStaticWebsiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Enables the static website feature of a storage account.
:param str resource_name: The name of the resource.
:param StorageAccountStaticWebsiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageAccountStaticWebsiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
error404_document: Optional[pulumi.Input[str]] = None,
index_document: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageAccountStaticWebsiteArgs.__new__(StorageAccountStaticWebsiteArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["error404_document"] = error404_document
__props__.__dict__["index_document"] = index_document
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["container_name"] = None
super(StorageAccountStaticWebsite, __self__).__init__(
'azure-native:storage:StorageAccountStaticWebsite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageAccountStaticWebsite':
"""
Get an existing StorageAccountStaticWebsite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageAccountStaticWebsiteArgs.__new__(StorageAccountStaticWebsiteArgs)
__props__.__dict__["container_name"] = None
__props__.__dict__["error404_document"] = None
__props__.__dict__["index_document"] = None
return StorageAccountStaticWebsite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Output[str]:
"""
The name of the container to upload blobs to.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="error404Document")
def error404_document(self) -> pulumi.Output[Optional[str]]:
"""
The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file.
"""
return pulumi.get(self, "error404_document")
@property
@pulumi.getter(name="indexDocument")
def index_document(self) -> pulumi.Output[Optional[str]]:
"""
The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive.
"""
return pulumi.get(self, "index_document") | en | 0.783179 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a StorageAccountStaticWebsite resource. :param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. :param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. The name of the storage account within the specified resource group. The name of the resource group within the user's subscription. The name is case insensitive. The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. Enables the static website feature of a storage account. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. :param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. :param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. Enables the static website feature of a storage account. :param str resource_name: The name of the resource. :param StorageAccountStaticWebsiteArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing StorageAccountStaticWebsite resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. The name of the container to upload blobs to. The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. | 2.025002 | 2 |
python/example_code/s3/s3-python-example-get-bucket-policy.py | onehitcombo/aws-doc-sdk-examples | 3 | 741 | <gh_stars>1-10
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
# Call to S3 to retrieve the policy for the given bucket
result = s3.get_bucket_policy(Bucket='my-bucket')
print(result)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-get-bucket-policy.py demonstrates how to list the Amazon S3 Buckets in your account.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
| # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
# Call to S3 to retrieve the policy for the given bucket
result = s3.get_bucket_policy(Bucket='my-bucket')
print(result)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-get-bucket-policy.py demonstrates how to list the Amazon S3 Buckets in your account.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)] | en | 0.782885 | # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Create an S3 client # Call to S3 to retrieve the policy for the given bucket # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[s3-python-example-get-bucket-policy.py demonstrates how to list the Amazon S3 Buckets in your account.] # snippet-keyword:[Python] # snippet-keyword:[AWS SDK for Python (Boto3)] # snippet-keyword:[Code Sample] # snippet-keyword:[Amazon S3] # snippet-service:[s3] # snippet-sourcetype:[full-example] # snippet-sourcedate:[2018-06-25] # snippet-sourceauthor:[jschwarzwalder (AWS)] | 1.874488 | 2 |
lang/py/aingle/test/gen_interop_data.py | AIngleLab/aae | 0 | 742 | <reponame>AIngleLab/aae
#!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import aingle.codecs
import aingle.datafile
import aingle.io
import aingle.schema
NULL_CODEC = "null"
CODECS_TO_VALIDATE = aingle.codecs.KNOWN_CODECS.keys()
DATUM = {
"intField": 12,
"longField": 15234324,
"stringField": "hey",
"boolField": True,
"floatField": 1234.0,
"doubleField": -1234.0,
"bytesField": b"12312adf",
"nullField": None,
"arrayField": [5.0, 0.0, 12.0],
"mapField": {"a": {"label": "a"}, "bee": {"label": "cee"}},
"unionField": 12.0,
"enumField": "C",
"fixedField": b"1019181716151413",
"recordField": {"label": "blah", "children": [{"label": "inner", "children": []}]},
}
def generate(schema_path, output_path):
with open(schema_path) as schema_file:
interop_schema = aingle.schema.parse(schema_file.read())
for codec in CODECS_TO_VALIDATE:
filename = output_path
if codec != NULL_CODEC:
base, ext = os.path.splitext(output_path)
filename = base + "_" + codec + ext
with aingle.datafile.DataFileWriter(open(filename, "wb"), aingle.io.DatumWriter(), interop_schema, codec=codec) as dfw:
dfw.append(DATUM)
if __name__ == "__main__":
generate(sys.argv[1], sys.argv[2])
| #!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import aingle.codecs
import aingle.datafile
import aingle.io
import aingle.schema
NULL_CODEC = "null"
CODECS_TO_VALIDATE = aingle.codecs.KNOWN_CODECS.keys()
DATUM = {
"intField": 12,
"longField": 15234324,
"stringField": "hey",
"boolField": True,
"floatField": 1234.0,
"doubleField": -1234.0,
"bytesField": b"12312adf",
"nullField": None,
"arrayField": [5.0, 0.0, 12.0],
"mapField": {"a": {"label": "a"}, "bee": {"label": "cee"}},
"unionField": 12.0,
"enumField": "C",
"fixedField": b"1019181716151413",
"recordField": {"label": "blah", "children": [{"label": "inner", "children": []}]},
}
def generate(schema_path, output_path):
with open(schema_path) as schema_file:
interop_schema = aingle.schema.parse(schema_file.read())
for codec in CODECS_TO_VALIDATE:
filename = output_path
if codec != NULL_CODEC:
base, ext = os.path.splitext(output_path)
filename = base + "_" + codec + ext
with aingle.datafile.DataFileWriter(open(filename, "wb"), aingle.io.DatumWriter(), interop_schema, codec=codec) as dfw:
dfw.append(DATUM)
if __name__ == "__main__":
generate(sys.argv[1], sys.argv[2]) | en | 0.866248 | #!/usr/bin/env python3 ## # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. | 1.855872 | 2 |
data_io/util/value_blob_erosion.py | Rekrau/PyGreentea | 0 | 743 | import numpy as np
from scipy import ndimage
def erode_value_blobs(array, steps=1, values_to_ignore=tuple(), new_value=0):
unique_values = list(np.unique(array))
all_entries_to_keep = np.zeros(shape=array.shape, dtype=np.bool)
for unique_value in unique_values:
entries_of_this_value = array == unique_value
if unique_value in values_to_ignore:
all_entries_to_keep = np.logical_or(entries_of_this_value, all_entries_to_keep)
else:
eroded_unique_indicator = ndimage.binary_erosion(entries_of_this_value, iterations=steps)
all_entries_to_keep = np.logical_or(eroded_unique_indicator, all_entries_to_keep)
result = array * all_entries_to_keep
if new_value != 0:
eroded_entries = np.logical_not(all_entries_to_keep)
new_values = new_value * eroded_entries
result += new_values
return result
| import numpy as np
from scipy import ndimage
def erode_value_blobs(array, steps=1, values_to_ignore=tuple(), new_value=0):
unique_values = list(np.unique(array))
all_entries_to_keep = np.zeros(shape=array.shape, dtype=np.bool)
for unique_value in unique_values:
entries_of_this_value = array == unique_value
if unique_value in values_to_ignore:
all_entries_to_keep = np.logical_or(entries_of_this_value, all_entries_to_keep)
else:
eroded_unique_indicator = ndimage.binary_erosion(entries_of_this_value, iterations=steps)
all_entries_to_keep = np.logical_or(eroded_unique_indicator, all_entries_to_keep)
result = array * all_entries_to_keep
if new_value != 0:
eroded_entries = np.logical_not(all_entries_to_keep)
new_values = new_value * eroded_entries
result += new_values
return result
| none | 1 | 2.642334 | 3 |
|
astropy/units/tests/test_logarithmic.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 3 | 744 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ...extern import six
from ...extern.six.moves import zip
import pickle
import itertools
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ...tests.helper import assert_quantity_allclose
from ... import units as u, constants as c
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation(object):
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
def test_predefined_string_roundtrip():
"""Ensure roundtripping; see #5015"""
with u.magnitude_zero_points.enable():
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regresssion for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings(object):
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( '
'\\mathrm{ct} \\right)}$')
assert lu4._repr_latex_() == lu4.to_string('latex')
class TestLogUnitConversion(object):
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
class TestLogUnitArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation(object):
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews(object):
def setup(self):
self.lq = u.Magnitude(np.arange(10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing(object):
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons(object):
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
# On python2, ordering operations always succeed, given essentially
# meaningless results.
if not six.PY2:
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods(object):
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(ValueError):
getattr(self.mJy, method)()
with pytest.raises(ValueError):
getattr(self.m1, method)()
class TestLogQuantityUfuncs(object):
"""Spot checks on ufuncs."""
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
def test_power(self):
assert np.all(np.power(self.mJy, 0.) == 1.)
assert np.all(np.power(self.m1, 1.) == self.m1)
assert np.all(np.power(self.mJy, 1.) == self.mJy)
assert np.all(np.power(self.m1, 2.) == self.m1 ** 2)
with pytest.raises(u.UnitsError):
np.power(self.mJy, 2.)
def test_not_implemented_with_physical_unit(self):
with pytest.raises(u.UnitsError):
np.square(self.mJy)
assert np.all(np.square(self.m1) == self.m1 ** 2)
| # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ...extern import six
from ...extern.six.moves import zip
import pickle
import itertools
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ...tests.helper import assert_quantity_allclose
from ... import units as u, constants as c
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation(object):
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
def test_predefined_string_roundtrip():
"""Ensure roundtripping; see #5015"""
with u.magnitude_zero_points.enable():
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regresssion for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings(object):
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( '
'\\mathrm{ct} \\right)}$')
assert lu4._repr_latex_() == lu4.to_string('latex')
class TestLogUnitConversion(object):
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
class TestLogUnitArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation(object):
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews(object):
def setup(self):
self.lq = u.Magnitude(np.arange(10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing(object):
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons(object):
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
# On python2, ordering operations always succeed, given essentially
# meaningless results.
if not six.PY2:
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods(object):
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(ValueError):
getattr(self.mJy, method)()
with pytest.raises(ValueError):
getattr(self.m1, method)()
class TestLogQuantityUfuncs(object):
"""Spot checks on ufuncs."""
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
def test_power(self):
assert np.all(np.power(self.mJy, 0.) == 1.)
assert np.all(np.power(self.m1, 1.) == self.m1)
assert np.all(np.power(self.mJy, 1.) == self.mJy)
assert np.all(np.power(self.m1, 2.) == self.m1 ** 2)
with pytest.raises(u.UnitsError):
np.power(self.mJy, 2.)
def test_not_implemented_with_physical_unit(self):
with pytest.raises(u.UnitsError):
np.square(self.mJy)
assert np.all(np.square(self.m1) == self.m1 ** 2)
| en | 0.886027 | # coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst Test the Logarithmic Units and Quantities Check logarithmic units are set up correctly. # eg, MagUnit() == u.mag # and u.mag == MagUnit() Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right. Create a LogUnit subclass object for given physical unit, and do basic check that output is right. Ensure roundtripping; see #5015 Check __ne__ works (regresssion for #5342). Do some spot checks that str, repr, etc. work as expected. Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts. # Check we round-trip. # And that we're not just returning True all the time. Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless. Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units. # Check that we round-trip. Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one. # But dimensionless_unscaled can be cancelled. # With dimensionless, normal units are OK, but we return a plain unit. # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. # If we effectively remove lu1, a normal unit should be returned. # For completeness, also ensure non-sensical operations fail Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible. # With dimensionless, though, it works, but returns a normal unit. # also check we roundtrip Check physical units are changed appropriately for fun, a more complicated example of addition and subtraction Check logarithmic quantities are all set up correctly Create LogQuantity subclass objects for some physical units, and basic check on transformations # Cannot view a physical quantity as a function quantity, since the # values would change. # But fine if we have the right magnitude. Check that we can use Quantity if we have subok=True # following issue #5851 Ensures we can convert from regular quantities. # check we return a logquantity; see #5178. # Cannot view as Quantity, since the unit cannot be represented. # But a dimensionless one is fine. Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity. # but dimensionless_unscaled can be cancelled # with dimensionless, normal units OK, but return normal quantities # now we essentially have the same quantity but with a prefactor of 2 Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible. # with dimensionless, it works, but falls back to normal quantity # (except for power=1) Check that addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately. Check that inplace addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately. For fun, a more complicated example of addition and subtraction. # On python2, ordering operations always succeed, given essentially # meaningless results. # For physically dimensionless, we can compare with the function unit. # but not some arbitrary unit, of course. Spot checks on ufuncs. | 2.143555 | 2 |
djconnectwise/tests/mocks.py | kti-sam/django-connectwise | 0 | 745 | <reponame>kti-sam/django-connectwise
import os
from mock import patch
from datetime import datetime, date, time
import json
import responses
from . import fixtures
from django.utils import timezone
CW_MEMBER_IMAGE_FILENAME = 'AnonymousMember.png'
def create_mock_call(method_name, return_value, side_effect=None):
"""Utility function for mocking the specified function or method"""
_patch = patch(method_name, side_effect=side_effect)
mock_get_call = _patch.start()
if not side_effect:
mock_get_call.return_value = return_value
return mock_get_call, _patch
def company_info_get_company_info_call(return_value):
method_name = 'djconnectwise.api.CompanyInfoManager.get_company_info'
return create_mock_call(method_name, return_value)
def company_api_get_call(return_value):
method_name = 'djconnectwise.api.CompanyAPIClient.get_companies'
return create_mock_call(method_name, return_value)
def company_api_by_id_call(return_value, raised=None):
method_name = 'djconnectwise.api.CompanyAPIClient.by_id'
return create_mock_call(method_name, return_value, side_effect=raised)
def company_api_get_company_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.CompanyAPIClient.get_company_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def company_api_get_company_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.CompanyAPIClient.get_company_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def projects_api_get_project_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def projects_api_get_project_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def projects_api_get_project_phases_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project_phases'
return create_mock_call(method_name, return_value, side_effect=raised)
def project_api_get_projects_call(return_value):
method_name = 'djconnectwise.api.ProjectAPIClient.get_projects'
return create_mock_call(method_name, return_value)
def project_api_get_project_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project'
return create_mock_call(method_name, return_value, side_effect=raised)
def _project_api_tickets_call(page=1, page_size=25, conditions=[]):
return_value = []
test_date = date(1948, 5, 14)
test_time = time(12, 0, 0, tzinfo=timezone.get_current_timezone())
test_datetime = datetime.combine(test_date, test_time)
conditions.append('lastUpdated>' + timezone.localtime(
value=test_datetime).isoformat()
)
if page == 1:
return_value = [fixtures.API_PROJECT_TICKET]
return return_value
def project_api_tickets_call():
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = create_mock_call(
method_name,
None,
side_effect=_project_api_tickets_call)
return mock_call, _patch
def project_api_tickets_test_command(return_value):
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = create_mock_call(method_name, return_value)
return mock_call, _patch
def sales_api_by_id_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.by_id'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunities_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunities'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunity_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunity_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunity_stages_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_stages'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_sales_probabilities_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_probabilities'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_entries_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_entries'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_entry_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_entry'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_calendars_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_calendars'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_holidays_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_holidays'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_holiday_lists_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_holiday_lists'
return create_mock_call(method_name, return_value, side_effect=raised)
def time_api_get_time_entries_call(return_value, raised=None):
method_name = 'djconnectwise.api.TimeAPIClient.get_time_entries'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_activities_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_activities_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_activity_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_activities_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_activity_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_single_activity_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_single_activity'
return create_mock_call(method_name, return_value, side_effect=raised)
def _service_api_tickets_call(page=1, page_size=25, conditions=[]):
return_value = []
test_date = date(1948, 5, 14)
test_time = time(12, 0, 0, tzinfo=timezone.get_current_timezone())
test_datetime = datetime.combine(test_date, test_time)
conditions.append('lastUpdated>' + timezone.localtime(
value=test_datetime).isoformat()
)
if page == 1:
return_value = [fixtures.API_SERVICE_TICKET]
return return_value
def service_api_tickets_call():
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = create_mock_call(
method_name,
None,
side_effect=_service_api_tickets_call)
return mock_call, _patch
def _service_api_get_ticket_call(ticket_id):
return fixtures.API_SERVICE_TICKET_MAP.get(ticket_id)
def service_api_get_ticket_call(raised=None):
method_name = 'djconnectwise.api.TicketAPIMixin.get_ticket'
mock_call, _patch = create_mock_call(
method_name,
None,
side_effect=raised if raised else _service_api_get_ticket_call)
return mock_call, _patch
def service_api_get_boards_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_boards'
return create_mock_call(method_name, return_value)
def service_api_update_ticket_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.update_ticket'
return create_mock_call(method_name, return_value)
def service_api_get_statuses_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_statuses'
return create_mock_call(method_name, return_value)
def service_api_get_priorities_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_priorities'
return create_mock_call(method_name, return_value)
def service_api_get_teams_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_teams'
return create_mock_call(method_name, return_value)
def service_api_get_notes_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_notes'
return create_mock_call(method_name, return_value)
def service_api_get_slas_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_slas'
return create_mock_call(method_name, return_value)
def service_api_get_sla_priorities_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_slapriorities'
return create_mock_call(method_name, return_value)
def service_api_get_types_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_types'
return create_mock_call(method_name, return_value)
def service_api_get_subtypes_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_subtypes'
return create_mock_call(method_name, return_value)
def service_api_get_items_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_items'
return create_mock_call(method_name, return_value)
def sales_api_get_opportunity_notes_call(return_value):
method_name = 'djconnectwise.api.SalesAPIClient.get_notes'
return create_mock_call(method_name, return_value)
def service_api_get_locations_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_locations'
return create_mock_call(method_name, return_value)
def system_api_get_connectwise_version_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_connectwise_version'
return create_mock_call(method_name, return_value)
def system_api_get_members_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_members'
return create_mock_call(method_name, return_value)
def system_api_get_member_image_by_photo_id_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.' \
+ 'get_member_image_by_photo_id'
return create_mock_call(method_name, return_value)
def system_api_get_member_count_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_members'
return create_mock_call(method_name, return_value)
def system_api_create_callback_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.create_callback'
return create_mock_call(method_name, return_value)
def system_api_delete_callback_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.delete_callback'
return create_mock_call(method_name, return_value)
def system_api_get_callbacks_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_callbacks'
return create_mock_call(method_name, return_value)
def system_api_get_territories_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_territories'
return create_mock_call(method_name, return_value)
def system_api_get_other_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_mycompanyother'
return create_mock_call(method_name, return_value)
def cw_api_fetch_resource_call(return_value):
method_name = 'djconnectwise.api.ConnectWiseAPIClient.fetch_resource'
return create_mock_call(method_name, return_value)
def get(url, data, headers=None, status=200):
"""Set up requests mock for given URL and JSON-serializable data."""
get_raw(url, json.dumps(data), "application/json", headers, status=status)
def time_api_get_work_types_call(return_value):
method_name = 'djconnectwise.api.TimeAPIClient.get_work_types'
return create_mock_call(method_name, return_value)
def time_api_get_work_roles_call(return_value):
method_name = 'djconnectwise.api.TimeAPIClient.get_work_roles'
return create_mock_call(method_name, return_value)
def finance_api_get_agreements_call(return_value):
method_name = 'djconnectwise.api.FinanceAPIClient.get_agreements'
return create_mock_call(method_name, return_value)
def get_raw(url, data, content_type="application/octet-stream", headers=None,
status=200):
"""Set up requests mock for given URL."""
responses.add(
responses.GET,
url,
body=data,
status=status,
content_type=content_type,
adding_headers=headers,
)
def get_member_avatar():
"""Return the avatar image data in the tests directory."""
cw_member_image_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
CW_MEMBER_IMAGE_FILENAME
)
with open(cw_member_image_path, 'rb') as anonymous_image_file:
return anonymous_image_file.read()
| import os
from mock import patch
from datetime import datetime, date, time
import json
import responses
from . import fixtures
from django.utils import timezone
CW_MEMBER_IMAGE_FILENAME = 'AnonymousMember.png'
def create_mock_call(method_name, return_value, side_effect=None):
"""Utility function for mocking the specified function or method"""
_patch = patch(method_name, side_effect=side_effect)
mock_get_call = _patch.start()
if not side_effect:
mock_get_call.return_value = return_value
return mock_get_call, _patch
def company_info_get_company_info_call(return_value):
method_name = 'djconnectwise.api.CompanyInfoManager.get_company_info'
return create_mock_call(method_name, return_value)
def company_api_get_call(return_value):
method_name = 'djconnectwise.api.CompanyAPIClient.get_companies'
return create_mock_call(method_name, return_value)
def company_api_by_id_call(return_value, raised=None):
method_name = 'djconnectwise.api.CompanyAPIClient.by_id'
return create_mock_call(method_name, return_value, side_effect=raised)
def company_api_get_company_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.CompanyAPIClient.get_company_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def company_api_get_company_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.CompanyAPIClient.get_company_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def projects_api_get_project_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def projects_api_get_project_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def projects_api_get_project_phases_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project_phases'
return create_mock_call(method_name, return_value, side_effect=raised)
def project_api_get_projects_call(return_value):
method_name = 'djconnectwise.api.ProjectAPIClient.get_projects'
return create_mock_call(method_name, return_value)
def project_api_get_project_call(return_value, raised=None):
method_name = 'djconnectwise.api.ProjectAPIClient.get_project'
return create_mock_call(method_name, return_value, side_effect=raised)
def _project_api_tickets_call(page=1, page_size=25, conditions=[]):
return_value = []
test_date = date(1948, 5, 14)
test_time = time(12, 0, 0, tzinfo=timezone.get_current_timezone())
test_datetime = datetime.combine(test_date, test_time)
conditions.append('lastUpdated>' + timezone.localtime(
value=test_datetime).isoformat()
)
if page == 1:
return_value = [fixtures.API_PROJECT_TICKET]
return return_value
def project_api_tickets_call():
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = create_mock_call(
method_name,
None,
side_effect=_project_api_tickets_call)
return mock_call, _patch
def project_api_tickets_test_command(return_value):
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = create_mock_call(method_name, return_value)
return mock_call, _patch
def sales_api_by_id_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.by_id'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunities_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunities'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunity_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunity_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_opportunity_stages_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_stages'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_sales_probabilities_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_probabilities'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_entries_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_entries'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_schedule_entry_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_entry'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_calendars_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_calendars'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_holidays_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_holidays'
return create_mock_call(method_name, return_value, side_effect=raised)
def schedule_api_get_holiday_lists_call(return_value, raised=None):
method_name = 'djconnectwise.api.ScheduleAPIClient.get_holiday_lists'
return create_mock_call(method_name, return_value, side_effect=raised)
def time_api_get_time_entries_call(return_value, raised=None):
method_name = 'djconnectwise.api.TimeAPIClient.get_time_entries'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_activities_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_activities_statuses_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_activity_statuses'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_activities_types_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_activity_types'
return create_mock_call(method_name, return_value, side_effect=raised)
def sales_api_get_single_activity_call(return_value, raised=None):
method_name = 'djconnectwise.api.SalesAPIClient.get_single_activity'
return create_mock_call(method_name, return_value, side_effect=raised)
def _service_api_tickets_call(page=1, page_size=25, conditions=[]):
return_value = []
test_date = date(1948, 5, 14)
test_time = time(12, 0, 0, tzinfo=timezone.get_current_timezone())
test_datetime = datetime.combine(test_date, test_time)
conditions.append('lastUpdated>' + timezone.localtime(
value=test_datetime).isoformat()
)
if page == 1:
return_value = [fixtures.API_SERVICE_TICKET]
return return_value
def service_api_tickets_call():
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = create_mock_call(
method_name,
None,
side_effect=_service_api_tickets_call)
return mock_call, _patch
def _service_api_get_ticket_call(ticket_id):
return fixtures.API_SERVICE_TICKET_MAP.get(ticket_id)
def service_api_get_ticket_call(raised=None):
method_name = 'djconnectwise.api.TicketAPIMixin.get_ticket'
mock_call, _patch = create_mock_call(
method_name,
None,
side_effect=raised if raised else _service_api_get_ticket_call)
return mock_call, _patch
def service_api_get_boards_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_boards'
return create_mock_call(method_name, return_value)
def service_api_update_ticket_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.update_ticket'
return create_mock_call(method_name, return_value)
def service_api_get_statuses_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_statuses'
return create_mock_call(method_name, return_value)
def service_api_get_priorities_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_priorities'
return create_mock_call(method_name, return_value)
def service_api_get_teams_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_teams'
return create_mock_call(method_name, return_value)
def service_api_get_notes_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_notes'
return create_mock_call(method_name, return_value)
def service_api_get_slas_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_slas'
return create_mock_call(method_name, return_value)
def service_api_get_sla_priorities_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_slapriorities'
return create_mock_call(method_name, return_value)
def service_api_get_types_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_types'
return create_mock_call(method_name, return_value)
def service_api_get_subtypes_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_subtypes'
return create_mock_call(method_name, return_value)
def service_api_get_items_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_items'
return create_mock_call(method_name, return_value)
def sales_api_get_opportunity_notes_call(return_value):
method_name = 'djconnectwise.api.SalesAPIClient.get_notes'
return create_mock_call(method_name, return_value)
def service_api_get_locations_call(return_value):
method_name = 'djconnectwise.api.ServiceAPIClient.get_locations'
return create_mock_call(method_name, return_value)
def system_api_get_connectwise_version_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_connectwise_version'
return create_mock_call(method_name, return_value)
def system_api_get_members_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_members'
return create_mock_call(method_name, return_value)
def system_api_get_member_image_by_photo_id_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.' \
+ 'get_member_image_by_photo_id'
return create_mock_call(method_name, return_value)
def system_api_get_member_count_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_members'
return create_mock_call(method_name, return_value)
def system_api_create_callback_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.create_callback'
return create_mock_call(method_name, return_value)
def system_api_delete_callback_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.delete_callback'
return create_mock_call(method_name, return_value)
def system_api_get_callbacks_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_callbacks'
return create_mock_call(method_name, return_value)
def system_api_get_territories_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_territories'
return create_mock_call(method_name, return_value)
def system_api_get_other_call(return_value):
method_name = 'djconnectwise.api.SystemAPIClient.get_mycompanyother'
return create_mock_call(method_name, return_value)
def cw_api_fetch_resource_call(return_value):
method_name = 'djconnectwise.api.ConnectWiseAPIClient.fetch_resource'
return create_mock_call(method_name, return_value)
def get(url, data, headers=None, status=200):
"""Set up requests mock for given URL and JSON-serializable data."""
get_raw(url, json.dumps(data), "application/json", headers, status=status)
def time_api_get_work_types_call(return_value):
method_name = 'djconnectwise.api.TimeAPIClient.get_work_types'
return create_mock_call(method_name, return_value)
def time_api_get_work_roles_call(return_value):
method_name = 'djconnectwise.api.TimeAPIClient.get_work_roles'
return create_mock_call(method_name, return_value)
def finance_api_get_agreements_call(return_value):
method_name = 'djconnectwise.api.FinanceAPIClient.get_agreements'
return create_mock_call(method_name, return_value)
def get_raw(url, data, content_type="application/octet-stream", headers=None,
status=200):
"""Set up requests mock for given URL."""
responses.add(
responses.GET,
url,
body=data,
status=status,
content_type=content_type,
adding_headers=headers,
)
def get_member_avatar():
"""Return the avatar image data in the tests directory."""
cw_member_image_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
CW_MEMBER_IMAGE_FILENAME
)
with open(cw_member_image_path, 'rb') as anonymous_image_file:
return anonymous_image_file.read() | en | 0.486864 | Utility function for mocking the specified function or method Set up requests mock for given URL and JSON-serializable data. Set up requests mock for given URL. Return the avatar image data in the tests directory. | 2.311571 | 2 |
visual_odometry/visual_odometry.py | vineeths96/Visual-Odometry | 2 | 746 | <reponame>vineeths96/Visual-Odometry<filename>visual_odometry/visual_odometry.py<gh_stars>1-10
from .monovideoodometry import MonoVideoOdometry
from .parameters import *
def visual_odometry(
image_path="./input/sequences/10/image_0/",
pose_path="./input/poses/10.txt",
fivepoint=False,
):
"""
Plots the estimated odometry path using either five point estimation or eight point estimation
:param image_path: Path to the directory of camera images
:param pose_path: Path to the directory of pose file
:param fivepoint: Whether to use five point or eight point method
:return: None
"""
vo = MonoVideoOdometry(image_path, pose_path, FOCAL, PP, K, LUCAS_KANADE_PARAMS, fivepoint)
trajectory = np.zeros(shape=(800, 1200, 3))
frame_count = 0
while vo.hasNextFrame():
frame_count += 1
frame = vo.current_frame
cv2.imshow("Frame", frame)
k = cv2.waitKey(1)
if k == 27:
break
vo.process_frame()
estimated_coordinates = vo.get_mono_coordinates()
true_coordinates = vo.get_true_coordinates()
print("MSE Error: ", np.linalg.norm(estimated_coordinates - true_coordinates))
print("x: {}, y: {}, z: {}".format(*[str(pt) for pt in estimated_coordinates]))
print("True_x: {}, True_y: {}, True_z: {}".format(*[str(pt) for pt in true_coordinates]))
draw_x, draw_y, draw_z = [int(round(x)) for x in estimated_coordinates]
true_x, true_y, true_z = [int(round(x)) for x in true_coordinates]
trajectory = cv2.circle(trajectory, (true_x + 400, true_z + 100), 1, list((0, 0, 255)), 4)
trajectory = cv2.circle(trajectory, (draw_x + 400, draw_z + 100), 1, list((0, 255, 0)), 4)
cv2.putText(
trajectory,
"Actual Position:",
(140, 90),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
cv2.putText(trajectory, "Red", (270, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.putText(
trajectory,
"Estimated Odometry Position:",
(30, 120),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
cv2.putText(
trajectory,
"Green",
(270, 120),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
)
cv2.imshow("trajectory", trajectory)
if frame_count % 5 == 0:
cv2.imwrite(f"./results/trajectory/trajectory_{frame_count}.png", trajectory)
cv2.imwrite(f"./results/trajectory.png", trajectory)
cv2.destroyAllWindows()
| from .monovideoodometry import MonoVideoOdometry
from .parameters import *
def visual_odometry(
image_path="./input/sequences/10/image_0/",
pose_path="./input/poses/10.txt",
fivepoint=False,
):
"""
Plots the estimated odometry path using either five point estimation or eight point estimation
:param image_path: Path to the directory of camera images
:param pose_path: Path to the directory of pose file
:param fivepoint: Whether to use five point or eight point method
:return: None
"""
vo = MonoVideoOdometry(image_path, pose_path, FOCAL, PP, K, LUCAS_KANADE_PARAMS, fivepoint)
trajectory = np.zeros(shape=(800, 1200, 3))
frame_count = 0
while vo.hasNextFrame():
frame_count += 1
frame = vo.current_frame
cv2.imshow("Frame", frame)
k = cv2.waitKey(1)
if k == 27:
break
vo.process_frame()
estimated_coordinates = vo.get_mono_coordinates()
true_coordinates = vo.get_true_coordinates()
print("MSE Error: ", np.linalg.norm(estimated_coordinates - true_coordinates))
print("x: {}, y: {}, z: {}".format(*[str(pt) for pt in estimated_coordinates]))
print("True_x: {}, True_y: {}, True_z: {}".format(*[str(pt) for pt in true_coordinates]))
draw_x, draw_y, draw_z = [int(round(x)) for x in estimated_coordinates]
true_x, true_y, true_z = [int(round(x)) for x in true_coordinates]
trajectory = cv2.circle(trajectory, (true_x + 400, true_z + 100), 1, list((0, 0, 255)), 4)
trajectory = cv2.circle(trajectory, (draw_x + 400, draw_z + 100), 1, list((0, 255, 0)), 4)
cv2.putText(
trajectory,
"Actual Position:",
(140, 90),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
cv2.putText(trajectory, "Red", (270, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.putText(
trajectory,
"Estimated Odometry Position:",
(30, 120),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
cv2.putText(
trajectory,
"Green",
(270, 120),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
)
cv2.imshow("trajectory", trajectory)
if frame_count % 5 == 0:
cv2.imwrite(f"./results/trajectory/trajectory_{frame_count}.png", trajectory)
cv2.imwrite(f"./results/trajectory.png", trajectory)
cv2.destroyAllWindows() | en | 0.813991 | Plots the estimated odometry path using either five point estimation or eight point estimation :param image_path: Path to the directory of camera images :param pose_path: Path to the directory of pose file :param fivepoint: Whether to use five point or eight point method :return: None | 2.841986 | 3 |
tf-2-data-parallelism/src/utils.py | Amirosimani/amazon-sagemaker-script-mode | 144 | 747 | import os
import numpy as np
import tensorflow as tf
def get_train_data(train_dir, batch_size):
train_images = np.load(os.path.join(train_dir, 'train_images.npy'))
train_labels = np.load(os.path.join(train_dir, 'train_labels.npy'))
print('train_images', train_images.shape, 'train_labels', train_labels.shape)
dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size)
return dataset_train
def get_val_data(val_dir):
test_images = np.load(os.path.join(val_dir, 'validation_images.npy'))
test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy'))
print('validation_images', test_images.shape, 'validation_labels', test_labels.shape)
dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
return dataset_test
| import os
import numpy as np
import tensorflow as tf
def get_train_data(train_dir, batch_size):
train_images = np.load(os.path.join(train_dir, 'train_images.npy'))
train_labels = np.load(os.path.join(train_dir, 'train_labels.npy'))
print('train_images', train_images.shape, 'train_labels', train_labels.shape)
dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size)
return dataset_train
def get_val_data(val_dir):
test_images = np.load(os.path.join(val_dir, 'validation_images.npy'))
test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy'))
print('validation_images', test_images.shape, 'validation_labels', test_labels.shape)
dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
return dataset_test
| none | 1 | 2.684903 | 3 |
|
scripts/run_rbf_comparison_car_air_top5.py | CaptainCandy/influence-release | 0 | 748 | <filename>scripts/run_rbf_comparison_car_air_top5.py
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 16:26:35 2019
@author: Administrator
"""
# Forked from run_rbf_comparison.py
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import math
import copy
import numpy as np
import pandas as pd
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import random
import sys
sys.path.append("C:/Tang/influence-release-master") #设置自定义包的搜索路径
from load_vehicles import load_vehicles
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from sklearn.metrics.pairwise import rbf_kernel
from influence.inceptionModel import BinaryInceptionModel
from influence.smooth_hinge import SmoothHinge
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
import influence.dataset as dataset
from influence.dataset import DataSet
from influence.dataset_poisoning import generate_inception_features
#%%
def get_Y_pred_correct_inception(model):
Y_test = model.data_sets.test.labels
if np.min(Y_test) < -0.5:
Y_test = (np.copy(Y_test) + 1) / 2
Y_pred = model.sess.run(model.preds, feed_dict=model.all_test_feed_dict)
Y_pred_correct = np.zeros([len(Y_test)])
for idx, label in enumerate(Y_test):
Y_pred_correct[idx] = Y_pred[idx, int(label)]
return Y_pred_correct
num_classes = 2
num_train_ex_per_class = 40
num_test_ex_per_class = 300
dataset_name = 'carair_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class)
image_data_sets = load_vehicles(
num_train_ex_per_class=num_train_ex_per_class,
num_test_ex_per_class=num_test_ex_per_class)
weight_decay = 0.001
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
#%%
### Generate kernelized feature vectors
X_train = image_data_sets.train.x
X_test = image_data_sets.test.x
Y_train = np.copy(image_data_sets.train.labels) * 2 - 1
Y_test = np.copy(image_data_sets.test.labels) * 2 - 1
num_train = X_train.shape[0]
num_test = X_test.shape[0]
X_stacked = np.vstack((X_train, X_test))
gamma = 0.05
weight_decay = 0.0001
K = rbf_kernel(X_stacked, gamma = gamma / num_train)
# =============================================================================
# L = slin.cholesky(K, lower=True)
# L_train = L[:num_train, :num_train]
# L_test = L[num_train:, :num_train]
# =============================================================================
K_train = K[:num_train, :num_train]
K_test = K[num_train:, :num_train]
### Compare top 5 influential examples from each network
test_idx = 0
## RBF
input_channels = 1
weight_decay = 0.001
batch_size = num_train
initial_learning_rate = 0.001
keep_probs = None
max_lbfgs_iter = 1000
use_bias = False
decay_epochs = [1000, 10000]
tf.reset_default_graph()
X_train = image_data_sets.train.x
Y_train = image_data_sets.train.labels * 2 - 1
train = DataSet(K_train, Y_train)
test = DataSet(K_test, Y_test)
data_sets = base.Datasets(train=train, validation=None, test=test)
input_dim = data_sets.train.x.shape[1]
# Train with hinge
print('Train rbf with hinge...')
rbf_model = SmoothHinge(
temp=0,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='carair_rbf_hinge_t-0')
rbf_model.train()
hinge_W = rbf_model.sess.run(rbf_model.params)[0]
# Then load weights into smoothed version
print('Load weights into smoothed version...')
tf.reset_default_graph()
rbf_model = SmoothHinge(
temp=0.001,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='car_air_rbf_hinge_t-0.001')
params_feed_dict = {}
params_feed_dict[rbf_model.W_placeholder] = hinge_W
rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict)
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
#%%
## Inception
dataset_name = 'carair_40_300'
test_idx = 0
# Generate inception features
print('Generate inception features...')
img_side = 299
num_channels = 3
num_train_ex_per_class = 40
num_test_ex_per_class = 300
batch_size = 20 #TODO: 需要根据配置修改
# reset_default_graph大概就是重置当前线程,让tf session里定义的东西都失效,重来。就是重开一个神经网络session
tf.reset_default_graph()
full_model_name = '%s_inception' % dataset_name
# 下面的语句是定义一个inception双分类器
full_model = BinaryInceptionModel(
img_side=img_side,
num_channels=num_channels,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=image_data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=True,
train_dir='output9',
log_dir='log',
model_name=full_model_name)
# 下面的代码是在使用inception的卷积层生成特征
train_inception_features_val = generate_inception_features(
full_model,
image_data_sets.train.x,
image_data_sets.train.labels,
batch_size=batch_size)
test_inception_features_val = generate_inception_features(
full_model,
image_data_sets.test.x,
image_data_sets.test.labels,
batch_size=batch_size)
train = DataSet(
train_inception_features_val,
image_data_sets.train.labels)
test = DataSet(
test_inception_features_val,
image_data_sets.test.labels)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
validation = None
# 上面的代码是训练了inception模型的全连接层前面的部分,因此输出的feature有2048个维度
data_sets = base.Datasets(train=train, validation=validation, test=test)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
# validation = None
# data_sets = base.Datasets(train=train, validation=validation, test=test)
# 下面的代码利用从inception卷积层训练完成后的feature进行一个二分类逻辑回归,取消卷积层后面的FC全连接层
print('Train logistic regression after inception...')
input_dim = 2048
weight_decay = 0.001
batch_size = 20
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
max_lbfgs_iter = 1000
num_classes = 2
tf.reset_default_graph()
inception_model = BinaryLogisticRegressionWithLBFGS(
input_dim=input_dim,
weight_decay=weight_decay,
max_lbfgs_iter=max_lbfgs_iter,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output9',
log_dir='log',
model_name='%s_inception_onlytop' % dataset_name)
inception_model.train()
# =============================================================================
# inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
# [test_idx],
# np.arange(len(inception_model.data_sets.train.labels)),
# force_refresh=True)
#
# x_test = X_test[test_idx, :]
# y_test = Y_test[test_idx]
#
#
# distances = dataset.find_distances(x_test, X_train)
# flipped_idx = Y_train != y_test
# rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
# rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
# inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
#
#
# np.savez(
# 'output7/rbf_carair_results_%s' % test_idx,
# test_idx=test_idx,
# distances=distances,
# flipped_idx=flipped_idx,
# rbf_margins_test=rbf_margins_test,
# rbf_margins_train=rbf_margins_train,
# inception_Y_pred_correct=inception_Y_pred_correct,
# rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
# inception_predicted_loss_diffs=inception_predicted_loss_diffs
# )
# =============================================================================
#%%
print('Save results...')
#rand_test = random.sample(range(1, 600),50)
#np.savez('output7/rand_test_point', rand_test=rand_test)
for test_idx in range(1, 600):
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(inception_model.data_sets.train.labels)),
force_refresh=True)
x_test = X_test[test_idx, :]
y_test = Y_test[test_idx]
distances = dataset.find_distances(x_test, X_train)
flipped_idx = Y_train != y_test
rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
np.savez(
'output9/rbf_carair_results_%s' % test_idx,
test_idx=test_idx,
distances=distances,
flipped_idx=flipped_idx,
rbf_margins_test=rbf_margins_test,
rbf_margins_train=rbf_margins_train,
inception_Y_pred_correct=inception_Y_pred_correct,
rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
inception_predicted_loss_diffs=inception_predicted_loss_diffs
)
| <filename>scripts/run_rbf_comparison_car_air_top5.py
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 16:26:35 2019
@author: Administrator
"""
# Forked from run_rbf_comparison.py
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import math
import copy
import numpy as np
import pandas as pd
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import random
import sys
sys.path.append("C:/Tang/influence-release-master") #设置自定义包的搜索路径
from load_vehicles import load_vehicles
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from sklearn.metrics.pairwise import rbf_kernel
from influence.inceptionModel import BinaryInceptionModel
from influence.smooth_hinge import SmoothHinge
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
import influence.dataset as dataset
from influence.dataset import DataSet
from influence.dataset_poisoning import generate_inception_features
#%%
def get_Y_pred_correct_inception(model):
Y_test = model.data_sets.test.labels
if np.min(Y_test) < -0.5:
Y_test = (np.copy(Y_test) + 1) / 2
Y_pred = model.sess.run(model.preds, feed_dict=model.all_test_feed_dict)
Y_pred_correct = np.zeros([len(Y_test)])
for idx, label in enumerate(Y_test):
Y_pred_correct[idx] = Y_pred[idx, int(label)]
return Y_pred_correct
num_classes = 2
num_train_ex_per_class = 40
num_test_ex_per_class = 300
dataset_name = 'carair_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class)
image_data_sets = load_vehicles(
num_train_ex_per_class=num_train_ex_per_class,
num_test_ex_per_class=num_test_ex_per_class)
weight_decay = 0.001
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
#%%
### Generate kernelized feature vectors
X_train = image_data_sets.train.x
X_test = image_data_sets.test.x
Y_train = np.copy(image_data_sets.train.labels) * 2 - 1
Y_test = np.copy(image_data_sets.test.labels) * 2 - 1
num_train = X_train.shape[0]
num_test = X_test.shape[0]
X_stacked = np.vstack((X_train, X_test))
gamma = 0.05
weight_decay = 0.0001
K = rbf_kernel(X_stacked, gamma = gamma / num_train)
# =============================================================================
# L = slin.cholesky(K, lower=True)
# L_train = L[:num_train, :num_train]
# L_test = L[num_train:, :num_train]
# =============================================================================
K_train = K[:num_train, :num_train]
K_test = K[num_train:, :num_train]
### Compare top 5 influential examples from each network
test_idx = 0
## RBF
input_channels = 1
weight_decay = 0.001
batch_size = num_train
initial_learning_rate = 0.001
keep_probs = None
max_lbfgs_iter = 1000
use_bias = False
decay_epochs = [1000, 10000]
tf.reset_default_graph()
X_train = image_data_sets.train.x
Y_train = image_data_sets.train.labels * 2 - 1
train = DataSet(K_train, Y_train)
test = DataSet(K_test, Y_test)
data_sets = base.Datasets(train=train, validation=None, test=test)
input_dim = data_sets.train.x.shape[1]
# Train with hinge
print('Train rbf with hinge...')
rbf_model = SmoothHinge(
temp=0,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='carair_rbf_hinge_t-0')
rbf_model.train()
hinge_W = rbf_model.sess.run(rbf_model.params)[0]
# Then load weights into smoothed version
print('Load weights into smoothed version...')
tf.reset_default_graph()
rbf_model = SmoothHinge(
temp=0.001,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='car_air_rbf_hinge_t-0.001')
params_feed_dict = {}
params_feed_dict[rbf_model.W_placeholder] = hinge_W
rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict)
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
#%%
## Inception
dataset_name = 'carair_40_300'
test_idx = 0
# Generate inception features
print('Generate inception features...')
img_side = 299
num_channels = 3
num_train_ex_per_class = 40
num_test_ex_per_class = 300
batch_size = 20 #TODO: 需要根据配置修改
# reset_default_graph大概就是重置当前线程,让tf session里定义的东西都失效,重来。就是重开一个神经网络session
tf.reset_default_graph()
full_model_name = '%s_inception' % dataset_name
# 下面的语句是定义一个inception双分类器
full_model = BinaryInceptionModel(
img_side=img_side,
num_channels=num_channels,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=image_data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=True,
train_dir='output9',
log_dir='log',
model_name=full_model_name)
# 下面的代码是在使用inception的卷积层生成特征
train_inception_features_val = generate_inception_features(
full_model,
image_data_sets.train.x,
image_data_sets.train.labels,
batch_size=batch_size)
test_inception_features_val = generate_inception_features(
full_model,
image_data_sets.test.x,
image_data_sets.test.labels,
batch_size=batch_size)
train = DataSet(
train_inception_features_val,
image_data_sets.train.labels)
test = DataSet(
test_inception_features_val,
image_data_sets.test.labels)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
validation = None
# 上面的代码是训练了inception模型的全连接层前面的部分,因此输出的feature有2048个维度
data_sets = base.Datasets(train=train, validation=validation, test=test)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
# validation = None
# data_sets = base.Datasets(train=train, validation=validation, test=test)
# 下面的代码利用从inception卷积层训练完成后的feature进行一个二分类逻辑回归,取消卷积层后面的FC全连接层
print('Train logistic regression after inception...')
input_dim = 2048
weight_decay = 0.001
batch_size = 20
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
max_lbfgs_iter = 1000
num_classes = 2
tf.reset_default_graph()
inception_model = BinaryLogisticRegressionWithLBFGS(
input_dim=input_dim,
weight_decay=weight_decay,
max_lbfgs_iter=max_lbfgs_iter,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output9',
log_dir='log',
model_name='%s_inception_onlytop' % dataset_name)
inception_model.train()
# =============================================================================
# inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
# [test_idx],
# np.arange(len(inception_model.data_sets.train.labels)),
# force_refresh=True)
#
# x_test = X_test[test_idx, :]
# y_test = Y_test[test_idx]
#
#
# distances = dataset.find_distances(x_test, X_train)
# flipped_idx = Y_train != y_test
# rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
# rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
# inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
#
#
# np.savez(
# 'output7/rbf_carair_results_%s' % test_idx,
# test_idx=test_idx,
# distances=distances,
# flipped_idx=flipped_idx,
# rbf_margins_test=rbf_margins_test,
# rbf_margins_train=rbf_margins_train,
# inception_Y_pred_correct=inception_Y_pred_correct,
# rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
# inception_predicted_loss_diffs=inception_predicted_loss_diffs
# )
# =============================================================================
#%%
print('Save results...')
#rand_test = random.sample(range(1, 600),50)
#np.savez('output7/rand_test_point', rand_test=rand_test)
for test_idx in range(1, 600):
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(inception_model.data_sets.train.labels)),
force_refresh=True)
x_test = X_test[test_idx, :]
y_test = Y_test[test_idx]
distances = dataset.find_distances(x_test, X_train)
flipped_idx = Y_train != y_test
rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
np.savez(
'output9/rbf_carair_results_%s' % test_idx,
test_idx=test_idx,
distances=distances,
flipped_idx=flipped_idx,
rbf_margins_test=rbf_margins_test,
rbf_margins_train=rbf_margins_train,
inception_Y_pred_correct=inception_Y_pred_correct,
rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
inception_predicted_loss_diffs=inception_predicted_loss_diffs
)
| en | 0.322613 | # -*- coding: utf-8 -*- Created on Tue Mar 19 16:26:35 2019 @author: Administrator # Forked from run_rbf_comparison.py #设置自定义包的搜索路径 #%% #%% ### Generate kernelized feature vectors # ============================================================================= # L = slin.cholesky(K, lower=True) # L_train = L[:num_train, :num_train] # L_test = L[num_train:, :num_train] # ============================================================================= ### Compare top 5 influential examples from each network ## RBF # Train with hinge # Then load weights into smoothed version #%% ## Inception # Generate inception features #TODO: 需要根据配置修改 # reset_default_graph大概就是重置当前线程,让tf session里定义的东西都失效,重来。就是重开一个神经网络session # 下面的语句是定义一个inception双分类器 # 下面的代码是在使用inception的卷积层生成特征 # train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name) # train = DataSet(train_f['inception_features_val'], train_f['labels']) # test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name) # test = DataSet(test_f['inception_features_val'], test_f['labels']) # 上面的代码是训练了inception模型的全连接层前面的部分,因此输出的feature有2048个维度 # train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name) # train = DataSet(train_f['inception_features_val'], train_f['labels']) # test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name) # test = DataSet(test_f['inception_features_val'], test_f['labels']) # validation = None # data_sets = base.Datasets(train=train, validation=validation, test=test) # 下面的代码利用从inception卷积层训练完成后的feature进行一个二分类逻辑回归,取消卷积层后面的FC全连接层 # ============================================================================= # inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss( # [test_idx], # np.arange(len(inception_model.data_sets.train.labels)), # force_refresh=True) # # x_test = X_test[test_idx, :] # y_test = Y_test[test_idx] # # # distances = dataset.find_distances(x_test, X_train) # flipped_idx = Y_train != y_test # rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict) # rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict) # inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model) # # # np.savez( # 'output7/rbf_carair_results_%s' % test_idx, # test_idx=test_idx, # distances=distances, # flipped_idx=flipped_idx, # rbf_margins_test=rbf_margins_test, # rbf_margins_train=rbf_margins_train, # inception_Y_pred_correct=inception_Y_pred_correct, # rbf_predicted_loss_diffs=rbf_predicted_loss_diffs, # inception_predicted_loss_diffs=inception_predicted_loss_diffs # ) # ============================================================================= #%% #rand_test = random.sample(range(1, 600),50) #np.savez('output7/rand_test_point', rand_test=rand_test) | 1.864609 | 2 |
data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py | Keesiu/meta-kaggle | 0 | 749 | import sys
import pickle
##########################################################
# usage
# pypy find_4g.py xid_train.p ../../data/train
# xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify
# the order of samples in traing data
# ../../data/train is the path of original train data
##########################################################
xid_name=sys.argv[1]
data_path=sys.argv[2]
xid=pickle.load(open(xid_name)) #xid_train.p or xid_test.p
newc=pickle.load(open('newc.p'))
newc2=pickle.load(open('cutcmd3g_for_4g.p'))
cmd4g={}
for i in newc2:
for j in newc:
cmd4g[(i[0],i[1],i[2],j)]=0
print(newc)
for c,f in enumerate(xid):
count={}
fo=open(data_path+'/'+f+'.asm')
tot=0
a=-1
b=-1
d=-1
e=-1
for line in fo:
xx=line.split()
for x in xx:
if x in newc:
a=b
b=d
d=e
e=x
if (a,b,d,e) in cmd4g:
if (a,b,d,e) not in count:
count[(a,b,d,e)]=0
count[(a,b,d,e)]+=1
tot+=1
fo.close()
if True:#c%10000==0:
print(c*1.0/len(xid),tot)
for i in count:
cmd4g[i]=count[i]+cmd4g[i]
del count
cmd4gx={}
for i in cmd4g:
if cmd4g[i]>0:
cmd4gx[i]=cmd4g[i]
print(len(cmd4gx))
pickle.dump(cmd4gx,open('cmd4g.p','w'))
| import sys
import pickle
##########################################################
# usage
# pypy find_4g.py xid_train.p ../../data/train
# xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify
# the order of samples in traing data
# ../../data/train is the path of original train data
##########################################################
xid_name=sys.argv[1]
data_path=sys.argv[2]
xid=pickle.load(open(xid_name)) #xid_train.p or xid_test.p
newc=pickle.load(open('newc.p'))
newc2=pickle.load(open('cutcmd3g_for_4g.p'))
cmd4g={}
for i in newc2:
for j in newc:
cmd4g[(i[0],i[1],i[2],j)]=0
print(newc)
for c,f in enumerate(xid):
count={}
fo=open(data_path+'/'+f+'.asm')
tot=0
a=-1
b=-1
d=-1
e=-1
for line in fo:
xx=line.split()
for x in xx:
if x in newc:
a=b
b=d
d=e
e=x
if (a,b,d,e) in cmd4g:
if (a,b,d,e) not in count:
count[(a,b,d,e)]=0
count[(a,b,d,e)]+=1
tot+=1
fo.close()
if True:#c%10000==0:
print(c*1.0/len(xid),tot)
for i in count:
cmd4g[i]=count[i]+cmd4g[i]
del count
cmd4gx={}
for i in cmd4g:
if cmd4g[i]>0:
cmd4gx[i]=cmd4g[i]
print(len(cmd4gx))
pickle.dump(cmd4gx,open('cmd4g.p','w'))
| de | 0.386486 | ########################################################## # usage # pypy find_4g.py xid_train.p ../../data/train # xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify # the order of samples in traing data # ../../data/train is the path of original train data ########################################################## #xid_train.p or xid_test.p #c%10000==0: | 2.286075 | 2 |
src/domain/enums/__init__.py | Antonio-Gabriel/easepay_backend | 1 | 750 | from .months import Months
from .sizes import Size | from .months import Months
from .sizes import Size | none | 1 | 1.136667 | 1 |
|
pygments/lexers/trafficscript.py | blu-base/pygments | 1 | 751 | <reponame>blu-base/pygments<filename>pygments/lexers/trafficscript.py<gh_stars>1-10
"""
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
class RtsLexer(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['trafficscript', 'rts']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
}
| """
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
class RtsLexer(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['trafficscript', 'rts']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
} | en | 0.61424 | pygments.lexers.trafficscript ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for RiverBed's TrafficScript (RTS) language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_ .. versionadded:: 2.1 | 2.095476 | 2 |
pandas/tests/indexes/test_common.py | dimithras/pandas | 1 | 752 | """
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if isinstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index):
if isinstance(index, pd.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if isinstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index):
from copy import copy, deepcopy
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index):
# MultiIndex tested separately
if not len(index) or isinstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = pd.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == index.dtype
assert idx_unique_nan.dtype == index.dtype
for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not len(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(index, (MultiIndex, pd.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(index)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_drop_duplicates(self, index, keep):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if isinstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if len(index) == 0:
pytest.skip(
"empty index is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make unique index
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# make duplicated index
n = len(unique_idx)
duplicated_selection = np.random.choice(n, int(n * 1.5))
idx = holder(unique_idx.values[duplicated_selection])
# Series.duplicated is tested separately
expected_duplicated = (
pd.Series(duplicated_selection).duplicated(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
# Series.drop_duplicates is tested separately
expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
def test_drop_duplicates_no_duplicates(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make unique index
if isinstance(index, RangeIndex):
# RangeIndex cannot have duplicates
unique_idx = index
else:
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# check on unique index
expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
result_dropped = unique_idx.drop_duplicates()
tm.assert_index_equal(result_dropped, unique_idx)
# validate shallow copy
assert result_dropped is not unique_idx
def test_drop_duplicates_inplace(self, index):
msg = r"drop_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.drop_duplicates(inplace=True)
def test_has_duplicates(self, index):
holder = type(index)
if not len(index) or isinstance(index, (MultiIndex, RangeIndex)):
# MultiIndex tested separately in:
# tests/indexes/multi/test_unique_and_duplicates.
# RangeIndex is unique by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_unique is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_astype_preserves_name(self, index, dtype):
# https://github.com/pandas-dev/pandas/issues/32013
if isinstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
try:
# Some of these conversions cannot succeed so we use a try / except
result = index.astype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if isinstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_ravel_deprecation(self, index):
# GH#19956 ravel returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.ravel()
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_values_invalid_na_position(index_with_missing, na_position):
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
if na_position not in ["first", "last"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_with_missing(index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_values(na_position=na_position)
tm.assert_index_equal(result, expected)
| """
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if isinstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index):
if isinstance(index, pd.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if isinstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index):
from copy import copy, deepcopy
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index):
# MultiIndex tested separately
if not len(index) or isinstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = pd.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == index.dtype
assert idx_unique_nan.dtype == index.dtype
for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not len(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(index, (MultiIndex, pd.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(index)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_drop_duplicates(self, index, keep):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if isinstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if len(index) == 0:
pytest.skip(
"empty index is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make unique index
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# make duplicated index
n = len(unique_idx)
duplicated_selection = np.random.choice(n, int(n * 1.5))
idx = holder(unique_idx.values[duplicated_selection])
# Series.duplicated is tested separately
expected_duplicated = (
pd.Series(duplicated_selection).duplicated(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
# Series.drop_duplicates is tested separately
expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
def test_drop_duplicates_no_duplicates(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make unique index
if isinstance(index, RangeIndex):
# RangeIndex cannot have duplicates
unique_idx = index
else:
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# check on unique index
expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
result_dropped = unique_idx.drop_duplicates()
tm.assert_index_equal(result_dropped, unique_idx)
# validate shallow copy
assert result_dropped is not unique_idx
def test_drop_duplicates_inplace(self, index):
msg = r"drop_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.drop_duplicates(inplace=True)
def test_has_duplicates(self, index):
holder = type(index)
if not len(index) or isinstance(index, (MultiIndex, RangeIndex)):
# MultiIndex tested separately in:
# tests/indexes/multi/test_unique_and_duplicates.
# RangeIndex is unique by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_unique is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_astype_preserves_name(self, index, dtype):
# https://github.com/pandas-dev/pandas/issues/32013
if isinstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
try:
# Some of these conversions cannot succeed so we use a try / except
result = index.astype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if isinstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_ravel_deprecation(self, index):
# GH#19956 ravel returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.ravel()
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_values_invalid_na_position(index_with_missing, na_position):
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
if na_position not in ["first", "last"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_with_missing(index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_values(na_position=na_position)
tm.assert_index_equal(result, expected)
| en | 0.759899 | Collection of tests asserting things that should be true for any index subclass. Makes use of the `indices` fixture defined in pandas/tests/indexes/conftest.py. # GH 21115 # Tested separately in test_multi.py # GH 21121 : droplevel with tuple name # GH 20527 # With .rename() # With .set_names() # FutureWarning from non-tuple sequence of nd indexing # GH 9943 9862 # Test unions with various name combinations # Do not test MultiIndex or repeats # Test copy.union(copy) # Test copy.union(empty) # Test empty.union(copy) # Test empty.union(empty) # test copy.union(subset) - need sort for unicode and string # GH35847 # Test intersections with various name combinations # Test copy.intersection(copy) # Test copy.intersection(empty) # Test empty.intersection(copy) # Test empty.intersection(empty) # test copy.intersection(subset) - need sort for unicode and string # 22866 # don't tests a MultiIndex here (as its tested separated) # should return None # FIXME: dont leave commented-out # with pytest.raises(TypeError, match="list-like"): # # should still fail even if it would be the right length # ind.set_names("a") # rename in place just leaves tuples and other containers alone # don't test a MultiIndex here (as its tested separated) # don't test a CategoricalIndex because categories change (GH 18291) # GH 17896 # MultiIndex tested separately # We test against `idx_unique`, so first we make sure it's unique # and doesn't contain nans. # nans: # i.e. needs_i8_conversion but not period_dtype, as above # GH17271 # not implemented for tuple searches in MultiIndex # or Intervals searches in IntervalIndex # nothing to test if the index is empty # determine the expected results (handle dupes for 'right') # all values are the same, expected_right should be length # test _searchsorted_monotonic in all cases # test searchsorted only for increasing # non-monotonic should raise. # make unique index # make duplicated index # Series.duplicated is tested separately # Series.drop_duplicates is tested separately # make unique index # RangeIndex cannot have duplicates # check on unique index # validate shallow copy # MultiIndex tested separately in: # tests/indexes/multi/test_unique_and_duplicates. # RangeIndex is unique by definition. # https://github.com/pandas-dev/pandas/issues/32013 # Some of these conversions cannot succeed so we use a try / except # GH#19956 ravel returning ndarray is deprecated # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) # GH 35584. Test that sort_values works with missing values, # sort non-missing and place missing according to na_position # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) | 2.464618 | 2 |
tests/test_dynamics.py | leasanchez/BiorbdOptim | 34 | 753 | <filename>tests/test_dynamics.py
import pytest
import numpy as np
from casadi import MX, SX
import biorbd_casadi as biorbd
from bioptim.dynamics.configure_problem import ConfigureProblem
from bioptim.dynamics.dynamics_functions import DynamicsFunctions
from bioptim.interfaces.biorbd_interface import BiorbdInterface
from bioptim.misc.enums import ControlType
from bioptim.optimization.non_linear_program import NonLinearProgram
from bioptim.optimization.optimization_vector import OptimizationVector
from bioptim.dynamics.configure_problem import DynamicsFcn, Dynamics
from .utils import TestUtils
class OptimalControlProgram:
def __init__(self, nlp):
self.n_phases = 1
self.nlp = [nlp]
self.v = OptimizationVector(self)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
def test_torque_driven(with_contact, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_DRIVEN, with_contact=with_contact), False)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.8631034, 0.3251833, 0.1195942, 0.4937956, -7.7700092, -7.5782306, 21.7073786, -16.3059315],
)
np.testing.assert_almost_equal(contact_out[:, 0], [-47.8131136, 111.1726516, -24.4449121])
else:
np.testing.assert_almost_equal(
x_out[:, 0], [0.6118529, 0.785176, 0.6075449, 0.8083973, -0.3214905, -0.1912131, 0.6507164, -0.2359716]
)
np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124])
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.86310343, 0.32518332, 0.11959425, 0.4937956, 0.30731739, -9.97912778, 1.15263778, 36.02430956],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.30241366, -10.38503791, 1.60445173, 35.80238642],
)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
def test_torque_derivative_driven(with_contact, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_DERIVATIVE_DRIVEN, with_contact=with_contact), False
)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.8631034,
0.3251833,
0.1195942,
0.4937956,
-7.7700092,
-7.5782306,
21.7073786,
-16.3059315,
0.8074402,
0.4271078,
0.417411,
0.3232029,
],
)
np.testing.assert_almost_equal(contact_out[:, 0], [-47.8131136, 111.1726516, -24.4449121])
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.61185289,
0.78517596,
0.60754485,
0.80839735,
-0.32149054,
-0.19121314,
0.65071636,
-0.23597164,
0.38867729,
0.54269608,
0.77224477,
0.72900717,
],
)
np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124])
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.86310343,
0.32518332,
0.11959425,
0.4937956,
0.30731739,
-9.97912778,
1.15263778,
36.02430956,
0.80744016,
0.42710779,
0.417411,
0.32320293,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.61185289,
0.78517596,
0.60754485,
0.80839735,
-0.30241366,
-10.38503791,
1.60445173,
35.80238642,
0.38867729,
0.54269608,
0.77224477,
0.72900717,
],
)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
def test_torque_activation_driven(with_contact, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 2, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_ACTIVATIONS_DRIVEN, with_contact=with_contact), False
)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.8631, 0.32518, 0.11959, 0.4938, 19.01887, 18.51503, -53.08574, 58.48719],
decimal=5,
)
np.testing.assert_almost_equal(contact_out[:, 0], [109.8086936, 3790.3932439, -3571.7858574])
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.61185289, 0.78517596, 0.60754485, 0.80839735, 0.78455384, -0.16844256, -1.56184114, 1.97658587],
decimal=5,
)
np.testing.assert_almost_equal(contact_out[:, 0], [-7.88958997, 329.70828173, -263.55516549])
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
8.63103426e-01,
3.25183322e-01,
1.19594246e-01,
4.93795596e-01,
1.73558072e01,
-4.69891264e01,
1.81396922e02,
3.61170139e03,
],
decimal=5,
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
6.11852895e-01,
7.85175961e-01,
6.07544852e-01,
8.08397348e-01,
-2.38262975e01,
-5.82033454e01,
1.27439020e02,
3.66531163e03,
],
decimal=5,
)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
@pytest.mark.parametrize("with_torque", [False, True])
@pytest.mark.parametrize("with_excitations", [False, True])
def test_muscle_driven(with_excitations, with_contact, with_torque, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/muscle_driven_ocp/models/arm26_with_contact.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 2 + nlp.model.nbMuscles(), 1))
nlp.u_bounds = np.zeros((nlp.model.nbMuscles(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp,
"dynamics_type",
Dynamics(
DynamicsFcn.MUSCLE_DRIVEN,
with_torque=with_torque,
with_excitations=with_excitations,
with_contact=with_contact,
),
False,
)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact: # Warning this test is a bit bogus, there since the model does not have contacts
if with_torque:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
1.07179622,
-33.76217857,
36.21815923,
46.87928022,
-1.80189035,
53.3914525,
48.30056919,
63.69373374,
-28.15700995,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.29662878e00,
3.00872062e02,
-9.50354903e02,
8.60630831e00,
3.19433638e00,
2.97405608e01,
-2.02754226e01,
-2.32467778e01,
-4.19135012e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[6.15850098e-01, 5.03136259e-01, 6.42419278e-01, -8.06478367e00, 2.42279101e02, -7.72114103e02],
decimal=6,
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -3.80892207e00, 1.20476051e02, -4.33291346e02],
decimal=6,
)
else:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
0.91952705,
-39.04876174,
45.31837288,
55.65557816,
50.47052688,
0.36025589,
58.92377491,
29.70094194,
-15.13534937,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.72712350e00,
3.10866170e02,
-9.82725656e02,
-7.72228930e00,
-1.13759732e01,
9.51906209e01,
4.45077128e00,
-5.20261014e00,
-2.80864106e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02],
decimal=6,
)
else:
if with_torque:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
1.07179622,
-33.76217857,
36.21815923,
46.87928022,
-1.80189035,
53.3914525,
48.30056919,
63.69373374,
-28.15700995,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.29662878e00,
3.00872062e02,
-9.50354903e02,
8.60630831e00,
3.19433638e00,
2.97405608e01,
-2.02754226e01,
-2.32467778e01,
-4.19135012e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[6.15850098e-01, 5.03136259e-01, 6.42419278e-01, -8.06478367e00, 2.42279101e02, -7.72114103e02],
decimal=6,
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -3.80892207e00, 1.20476051e02, -4.33291346e02],
decimal=6,
)
else:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
0.91952705,
-39.04876174,
45.31837288,
55.65557816,
50.47052688,
0.36025589,
58.92377491,
29.70094194,
-15.13534937,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.72712350e00,
3.10866170e02,
-9.82725656e02,
-7.72228930e00,
-1.13759732e01,
9.51906209e01,
4.45077128e00,
-5.20261014e00,
-2.80864106e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02],
decimal=6,
)
@pytest.mark.parametrize("with_contact", [False, True])
def test_custom_dynamics(with_contact):
def custom_dynamic(states, controls, parameters, nlp, with_contact=False) -> tuple:
DynamicsFunctions.apply_parameters(parameters, nlp)
q = DynamicsFunctions.get(nlp.states["q"], states)
qdot = DynamicsFunctions.get(nlp.states["qdot"], states)
tau = DynamicsFunctions.get(nlp.controls["tau"], controls)
dq = DynamicsFunctions.compute_qdot(nlp, q, qdot)
ddq = DynamicsFunctions.forward_dynamics(nlp, q, qdot, tau, with_contact)
return dq, ddq
def configure(ocp, nlp, with_contact=None):
ConfigureProblem.configure_q(nlp, True, False)
ConfigureProblem.configure_qdot(nlp, True, False)
ConfigureProblem.configure_tau(nlp, False, True)
ConfigureProblem.configure_dynamics_function(ocp, nlp, custom_dynamic, with_contact=with_contact)
if with_contact:
ConfigureProblem.configure_contact_function(ocp, nlp, DynamicsFunctions.forces_from_torque_driven)
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = MX
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp, "dynamics_type", Dynamics(configure, dynamic_function=custom_dynamic, with_contact=with_contact), False
)
np.random.seed(42)
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
np.testing.assert_almost_equal(
x_out[:, 0], [0.6118529, 0.785176, 0.6075449, 0.8083973, -0.3214905, -0.1912131, 0.6507164, -0.2359716]
)
np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124])
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.30241366, -10.38503791, 1.60445173, 35.80238642],
)
| <filename>tests/test_dynamics.py
import pytest
import numpy as np
from casadi import MX, SX
import biorbd_casadi as biorbd
from bioptim.dynamics.configure_problem import ConfigureProblem
from bioptim.dynamics.dynamics_functions import DynamicsFunctions
from bioptim.interfaces.biorbd_interface import BiorbdInterface
from bioptim.misc.enums import ControlType
from bioptim.optimization.non_linear_program import NonLinearProgram
from bioptim.optimization.optimization_vector import OptimizationVector
from bioptim.dynamics.configure_problem import DynamicsFcn, Dynamics
from .utils import TestUtils
class OptimalControlProgram:
def __init__(self, nlp):
self.n_phases = 1
self.nlp = [nlp]
self.v = OptimizationVector(self)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
def test_torque_driven(with_contact, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_DRIVEN, with_contact=with_contact), False)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.8631034, 0.3251833, 0.1195942, 0.4937956, -7.7700092, -7.5782306, 21.7073786, -16.3059315],
)
np.testing.assert_almost_equal(contact_out[:, 0], [-47.8131136, 111.1726516, -24.4449121])
else:
np.testing.assert_almost_equal(
x_out[:, 0], [0.6118529, 0.785176, 0.6075449, 0.8083973, -0.3214905, -0.1912131, 0.6507164, -0.2359716]
)
np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124])
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.86310343, 0.32518332, 0.11959425, 0.4937956, 0.30731739, -9.97912778, 1.15263778, 36.02430956],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.30241366, -10.38503791, 1.60445173, 35.80238642],
)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
def test_torque_derivative_driven(with_contact, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_DERIVATIVE_DRIVEN, with_contact=with_contact), False
)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.8631034,
0.3251833,
0.1195942,
0.4937956,
-7.7700092,
-7.5782306,
21.7073786,
-16.3059315,
0.8074402,
0.4271078,
0.417411,
0.3232029,
],
)
np.testing.assert_almost_equal(contact_out[:, 0], [-47.8131136, 111.1726516, -24.4449121])
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.61185289,
0.78517596,
0.60754485,
0.80839735,
-0.32149054,
-0.19121314,
0.65071636,
-0.23597164,
0.38867729,
0.54269608,
0.77224477,
0.72900717,
],
)
np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124])
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.86310343,
0.32518332,
0.11959425,
0.4937956,
0.30731739,
-9.97912778,
1.15263778,
36.02430956,
0.80744016,
0.42710779,
0.417411,
0.32320293,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.61185289,
0.78517596,
0.60754485,
0.80839735,
-0.30241366,
-10.38503791,
1.60445173,
35.80238642,
0.38867729,
0.54269608,
0.77224477,
0.72900717,
],
)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
def test_torque_activation_driven(with_contact, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 2, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_ACTIVATIONS_DRIVEN, with_contact=with_contact), False
)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.8631, 0.32518, 0.11959, 0.4938, 19.01887, 18.51503, -53.08574, 58.48719],
decimal=5,
)
np.testing.assert_almost_equal(contact_out[:, 0], [109.8086936, 3790.3932439, -3571.7858574])
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.61185289, 0.78517596, 0.60754485, 0.80839735, 0.78455384, -0.16844256, -1.56184114, 1.97658587],
decimal=5,
)
np.testing.assert_almost_equal(contact_out[:, 0], [-7.88958997, 329.70828173, -263.55516549])
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
8.63103426e-01,
3.25183322e-01,
1.19594246e-01,
4.93795596e-01,
1.73558072e01,
-4.69891264e01,
1.81396922e02,
3.61170139e03,
],
decimal=5,
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
6.11852895e-01,
7.85175961e-01,
6.07544852e-01,
8.08397348e-01,
-2.38262975e01,
-5.82033454e01,
1.27439020e02,
3.66531163e03,
],
decimal=5,
)
@pytest.mark.parametrize("cx", [MX, SX])
@pytest.mark.parametrize("with_external_force", [False, True])
@pytest.mark.parametrize("with_contact", [False, True])
@pytest.mark.parametrize("with_torque", [False, True])
@pytest.mark.parametrize("with_excitations", [False, True])
def test_muscle_driven(with_excitations, with_contact, with_torque, with_external_force, cx):
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/muscle_driven_ocp/models/arm26_with_contact.bioMod"
)
nlp.ns = 5
nlp.cx = cx
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 2 + nlp.model.nbMuscles(), 1))
nlp.u_bounds = np.zeros((nlp.model.nbMuscles(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp,
"dynamics_type",
Dynamics(
DynamicsFcn.MUSCLE_DRIVEN,
with_torque=with_torque,
with_excitations=with_excitations,
with_contact=with_contact,
),
False,
)
np.random.seed(42)
if with_external_force:
external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)]
nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0]
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact: # Warning this test is a bit bogus, there since the model does not have contacts
if with_torque:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
1.07179622,
-33.76217857,
36.21815923,
46.87928022,
-1.80189035,
53.3914525,
48.30056919,
63.69373374,
-28.15700995,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.29662878e00,
3.00872062e02,
-9.50354903e02,
8.60630831e00,
3.19433638e00,
2.97405608e01,
-2.02754226e01,
-2.32467778e01,
-4.19135012e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[6.15850098e-01, 5.03136259e-01, 6.42419278e-01, -8.06478367e00, 2.42279101e02, -7.72114103e02],
decimal=6,
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -3.80892207e00, 1.20476051e02, -4.33291346e02],
decimal=6,
)
else:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
0.91952705,
-39.04876174,
45.31837288,
55.65557816,
50.47052688,
0.36025589,
58.92377491,
29.70094194,
-15.13534937,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.72712350e00,
3.10866170e02,
-9.82725656e02,
-7.72228930e00,
-1.13759732e01,
9.51906209e01,
4.45077128e00,
-5.20261014e00,
-2.80864106e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02],
decimal=6,
)
else:
if with_torque:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
1.07179622,
-33.76217857,
36.21815923,
46.87928022,
-1.80189035,
53.3914525,
48.30056919,
63.69373374,
-28.15700995,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.29662878e00,
3.00872062e02,
-9.50354903e02,
8.60630831e00,
3.19433638e00,
2.97405608e01,
-2.02754226e01,
-2.32467778e01,
-4.19135012e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[6.15850098e-01, 5.03136259e-01, 6.42419278e-01, -8.06478367e00, 2.42279101e02, -7.72114103e02],
decimal=6,
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -3.80892207e00, 1.20476051e02, -4.33291346e02],
decimal=6,
)
else:
if with_excitations:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[
0.6158501,
0.50313626,
0.64241928,
0.91952705,
-39.04876174,
45.31837288,
55.65557816,
50.47052688,
0.36025589,
58.92377491,
29.70094194,
-15.13534937,
],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[
1.83404510e-01,
6.11852895e-01,
7.85175961e-01,
-9.72712350e00,
3.10866170e02,
-9.82725656e02,
-7.72228930e00,
-1.13759732e01,
9.51906209e01,
4.45077128e00,
-5.20261014e00,
-2.80864106e01,
],
decimal=6,
)
else:
if with_external_force:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288],
)
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02],
decimal=6,
)
@pytest.mark.parametrize("with_contact", [False, True])
def test_custom_dynamics(with_contact):
def custom_dynamic(states, controls, parameters, nlp, with_contact=False) -> tuple:
DynamicsFunctions.apply_parameters(parameters, nlp)
q = DynamicsFunctions.get(nlp.states["q"], states)
qdot = DynamicsFunctions.get(nlp.states["qdot"], states)
tau = DynamicsFunctions.get(nlp.controls["tau"], controls)
dq = DynamicsFunctions.compute_qdot(nlp, q, qdot)
ddq = DynamicsFunctions.forward_dynamics(nlp, q, qdot, tau, with_contact)
return dq, ddq
def configure(ocp, nlp, with_contact=None):
ConfigureProblem.configure_q(nlp, True, False)
ConfigureProblem.configure_qdot(nlp, True, False)
ConfigureProblem.configure_tau(nlp, False, True)
ConfigureProblem.configure_dynamics_function(ocp, nlp, custom_dynamic, with_contact=with_contact)
if with_contact:
ConfigureProblem.configure_contact_function(ocp, nlp, DynamicsFunctions.forces_from_torque_driven)
# Prepare the program
nlp = NonLinearProgram()
nlp.model = biorbd.Model(
TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod"
)
nlp.ns = 5
nlp.cx = MX
nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1))
nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1))
ocp = OptimalControlProgram(nlp)
nlp.control_type = ControlType.CONSTANT
NonLinearProgram.add(
ocp, "dynamics_type", Dynamics(configure, dynamic_function=custom_dynamic, with_contact=with_contact), False
)
np.random.seed(42)
# Prepare the dynamics
ConfigureProblem.initialize(ocp, nlp)
# Test the results
states = np.random.rand(nlp.states.shape, nlp.ns)
controls = np.random.rand(nlp.controls.shape, nlp.ns)
params = np.random.rand(nlp.parameters.shape, nlp.ns)
x_out = np.array(nlp.dynamics_func(states, controls, params))
if with_contact:
contact_out = np.array(nlp.contact_forces_func(states, controls, params))
np.testing.assert_almost_equal(
x_out[:, 0], [0.6118529, 0.785176, 0.6075449, 0.8083973, -0.3214905, -0.1912131, 0.6507164, -0.2359716]
)
np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124])
else:
np.testing.assert_almost_equal(
x_out[:, 0],
[0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.30241366, -10.38503791, 1.60445173, 35.80238642],
)
| en | 0.820213 | # Prepare the program # Prepare the dynamics # Test the results # Prepare the program # Prepare the dynamics # Test the results # Prepare the program # Prepare the dynamics # Test the results # Prepare the program # Prepare the dynamics # Test the results # Warning this test is a bit bogus, there since the model does not have contacts # Prepare the program # Prepare the dynamics # Test the results | 2.09114 | 2 |
polyaxon/event_manager/event_manager.py | elyase/polyaxon | 0 | 754 | from hestia.manager_interface import ManagerInterface
from event_manager import event_actions
class EventManager(ManagerInterface):
def _get_state_data(self, event): # pylint:disable=arguments-differ
return event.event_type, event
def subscribe(self, event): # pylint:disable=arguments-differ
"""
>>> subscribe(SomeEvent)
"""
super().subscribe(obj=event)
def knows(self, event_type): # pylint:disable=arguments-differ
return super().knows(key=event_type)
def get(self, event_type): # pylint:disable=arguments-differ
return super().get(key=event_type)
def user_write_events(self):
"""Return event types where use acted on an object.
The write events are events with actions:
* CREATED
* UPDATED
* DELETED
* RESUMED
* COPIED
* CLONED
* STOPPED
"""
return [event_type for event_type, event in self.items if event.get_event_action()
in event_actions.WRITE_ACTIONS]
def user_view_events(self):
"""Return event types where use viewed a main object."""
return [event_type for event_type, event in self.items if event.get_event_action()
== event_actions.VIEWED]
| from hestia.manager_interface import ManagerInterface
from event_manager import event_actions
class EventManager(ManagerInterface):
def _get_state_data(self, event): # pylint:disable=arguments-differ
return event.event_type, event
def subscribe(self, event): # pylint:disable=arguments-differ
"""
>>> subscribe(SomeEvent)
"""
super().subscribe(obj=event)
def knows(self, event_type): # pylint:disable=arguments-differ
return super().knows(key=event_type)
def get(self, event_type): # pylint:disable=arguments-differ
return super().get(key=event_type)
def user_write_events(self):
"""Return event types where use acted on an object.
The write events are events with actions:
* CREATED
* UPDATED
* DELETED
* RESUMED
* COPIED
* CLONED
* STOPPED
"""
return [event_type for event_type, event in self.items if event.get_event_action()
in event_actions.WRITE_ACTIONS]
def user_view_events(self):
"""Return event types where use viewed a main object."""
return [event_type for event_type, event in self.items if event.get_event_action()
== event_actions.VIEWED]
| en | 0.608418 | # pylint:disable=arguments-differ # pylint:disable=arguments-differ >>> subscribe(SomeEvent) # pylint:disable=arguments-differ # pylint:disable=arguments-differ Return event types where use acted on an object. The write events are events with actions: * CREATED * UPDATED * DELETED * RESUMED * COPIED * CLONED * STOPPED Return event types where use viewed a main object. | 2.560294 | 3 |
test_f_login_andy.py | KotoLLC/peacenik-tests | 0 | 755 | from helpers import *
def test_f_login_andy():
url = "http://central.orbits.local/rpc.AuthService/Login"
raw_payload = {"name": "andy","password": "<PASSWORD>"}
payload = json.dumps(raw_payload)
headers = {'Content-Type': 'application/json'}
# convert dict to json by json.dumps() for body data.
response = requests.request("POST", url, headers=headers, data=payload)
save_cookies(response.cookies,"cookies.txt")
# Validate response headers and body contents, e.g. status code.
assert response.status_code == 200
# print full request and response
pretty_print_request(response.request)
pretty_print_response(response) | from helpers import *
def test_f_login_andy():
url = "http://central.orbits.local/rpc.AuthService/Login"
raw_payload = {"name": "andy","password": "<PASSWORD>"}
payload = json.dumps(raw_payload)
headers = {'Content-Type': 'application/json'}
# convert dict to json by json.dumps() for body data.
response = requests.request("POST", url, headers=headers, data=payload)
save_cookies(response.cookies,"cookies.txt")
# Validate response headers and body contents, e.g. status code.
assert response.status_code == 200
# print full request and response
pretty_print_request(response.request)
pretty_print_response(response) | en | 0.664599 | # convert dict to json by json.dumps() for body data. # Validate response headers and body contents, e.g. status code. # print full request and response | 2.741331 | 3 |
docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py | ian-r-rose/visualization | 11 | 756 | <reponame>ian-r-rose/visualization
#!/usr/bin/env python
# encoding: utf-8
r"""
Riemann solvers for the shallow water equations.
The available solvers are:
* Roe - Use Roe averages to caluclate the solution to the Riemann problem
* HLL - Use a HLL solver
* Exact - Use a newton iteration to calculate the exact solution to the
Riemann problem
.. math::
q_t + f(q)_x = 0
where
.. math::
q(x,t) = \left [ \begin{array}{c} h \\ h u \end{array} \right ],
the flux function is
.. math::
f(q) = \left [ \begin{array}{c} h u \\ hu^2 + 1/2 g h^2 \end{array}\right ].
and :math:`h` is the water column height, :math:`u` the velocity and :math:`g`
is the gravitational acceleration.
:Authors:
<NAME> (2009-02-05): Initial version
"""
# ============================================================================
# Copyright (C) 2009 <NAME> <<EMAIL>>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import numpy as np
num_eqn = 2
num_waves = 2
def shallow_roe_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Roe shallow water solver in 1d::
ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r))
cbar = sqrt( 0.5 * g * (h_l + h_r))
W_1 = | 1 | s_1 = ubar - cbar
| ubar - cbar |
W_2 = | 1 | s_1 = ubar + cbar
| ubar + cbar |
a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar
a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar
*problem_data* should contain:
- *g* - (float) Gravitational constant
- *efix* - (bool) Boolean as to whether a entropy fix should be used, if
not present, false is assumed
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.zeros( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute roe-averaged quantities
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
# Compute Flux structure
delta = q_r - q_l
a1 = 0.5 * (-delta[1,:] + (ubar + cbar) * delta[0,:]) / cbar
a2 = 0.5 * ( delta[1,:] - (ubar - cbar) * delta[0,:]) / cbar
# Compute each family of waves
wave[0,0,:] = a1
wave[1,0,:] = a1 * (ubar - cbar)
s[0,:] = ubar - cbar
wave[0,1,:] = a2
wave[1,1,:] = a2 * (ubar + cbar)
s[1,:] = ubar + cbar
if problem_data['efix']:
raise NotImplementedError("Entropy fix has not been implemented.")
else:
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_hll_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
HLL shallow water solver ::
W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2)
W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2)
Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2)
*problem_data* should contain:
- *g* - (float) Gravitational constant
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute Roe and right and left speeds
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
u_r = q_r[1,:] / q_r[0,:]
c_r = np.sqrt(problem_data['grav'] * q_r[0,:])
u_l = q_l[1,:] / q_l[0,:]
c_l = np.sqrt(problem_data['grav'] * q_l[0,:])
# Compute Einfeldt speeds
s_index = np.empty((4,num_rp))
s_index[0,:] = ubar+cbar
s_index[1,:] = ubar-cbar
s_index[2,:] = u_l + c_l
s_index[3,:] = u_l - c_l
s[0,:] = np.min(s_index,axis=0)
s_index[2,:] = u_r + c_r
s_index[3,:] = u_r - c_r
s[1,:] = np.max(s_index,axis=0)
# Compute middle state
q_hat = np.empty((2,num_rp))
q_hat[0,:] = ((q_r[1,:] - q_l[1,:] - s[1,:] * q_r[0,:]
+ s[0,:] * q_l[0,:]) / (s[0,:] - s[1,:]))
q_hat[1,:] = ((q_r[1,:]**2/q_r[0,:] + 0.5 * problem_data['grav'] * q_r[0,:]**2
- (q_l[1,:]**2/q_l[0,:] + 0.5 * problem_data['grav'] * q_l[0,:]**2)
- s[1,:] * q_r[1,:] + s[0,:] * q_l[1,:]) / (s[0,:] - s[1,:]))
# Compute each family of waves
wave[:,0,:] = q_hat - q_l
wave[:,1,:] = q_r - q_hat
# Compute variations
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_fwave_1d(q_l, q_r, aux_l, aux_r, problem_data):
r"""Shallow water Riemann solver using fwaves
Also includes support for bathymetry but be wary if you think you might have
dry states as this has not been tested.
*problem_data* should contain:
- *grav* - (float) Gravitational constant
- *sea_level* - (float) Datum from which the dry-state is calculated.
:Version: 1.0 (2014-09-05)
"""
g = problem_data['grav']
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
fwave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Extract state
u_l = np.where(q_l[0,:] - problem_data['sea_level'] > 1e-3,
q_l[1,:] / q_l[0,:], 0.0)
u_r = np.where(q_r[0,:] - problem_data['sea_level'] > 1e-3,
q_r[1,:] / q_r[0,:], 0.0)
phi_l = q_l[0,:] * u_l**2 + 0.5 * g * q_l[0,:]**2
phi_r = q_r[0,:] * u_r**2 + 0.5 * g * q_r[0,:]**2
# Speeds
s[0,:] = u_l - np.sqrt(g * q_l[0,:])
s[1,:] = u_r + np.sqrt(g * q_r[0,:])
delta1 = q_r[1,:] - q_l[1,:]
delta2 = phi_r - phi_l + g * 0.5 * (q_r[0,:] + q_l[0,:]) * (aux_r[0,:] - aux_l[0,:])
beta1 = (s[1,:] * delta1 - delta2) / (s[1,:] - s[0,:])
beta2 = (delta2 - s[0,:] * delta1) / (s[1,:] - s[0,:])
fwave[0,0,:] = beta1
fwave[1,0,:] = beta1 * s[0,:]
fwave[0,1,:] = beta2
fwave[1,1,:] = beta2 * s[1,:]
for m in xrange(num_eqn):
for mw in xrange(num_waves):
amdq[m,:] += (s[mw,:] < 0.0) * fwave[m,mw,:]
apdq[m,:] += (s[mw,:] >= 0.0) * fwave[m,mw,:]
return fwave, s, amdq, apdq
def shallow_exact_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Exact shallow water Riemann solver
.. warning::
This solver has not been implemented.
"""
raise NotImplementedError("The exact swe solver has not been implemented.")
| #!/usr/bin/env python
# encoding: utf-8
r"""
Riemann solvers for the shallow water equations.
The available solvers are:
* Roe - Use Roe averages to caluclate the solution to the Riemann problem
* HLL - Use a HLL solver
* Exact - Use a newton iteration to calculate the exact solution to the
Riemann problem
.. math::
q_t + f(q)_x = 0
where
.. math::
q(x,t) = \left [ \begin{array}{c} h \\ h u \end{array} \right ],
the flux function is
.. math::
f(q) = \left [ \begin{array}{c} h u \\ hu^2 + 1/2 g h^2 \end{array}\right ].
and :math:`h` is the water column height, :math:`u` the velocity and :math:`g`
is the gravitational acceleration.
:Authors:
<NAME> (2009-02-05): Initial version
"""
# ============================================================================
# Copyright (C) 2009 <NAME> <<EMAIL>>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import numpy as np
num_eqn = 2
num_waves = 2
def shallow_roe_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Roe shallow water solver in 1d::
ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r))
cbar = sqrt( 0.5 * g * (h_l + h_r))
W_1 = | 1 | s_1 = ubar - cbar
| ubar - cbar |
W_2 = | 1 | s_1 = ubar + cbar
| ubar + cbar |
a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar
a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar
*problem_data* should contain:
- *g* - (float) Gravitational constant
- *efix* - (bool) Boolean as to whether a entropy fix should be used, if
not present, false is assumed
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.zeros( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute roe-averaged quantities
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
# Compute Flux structure
delta = q_r - q_l
a1 = 0.5 * (-delta[1,:] + (ubar + cbar) * delta[0,:]) / cbar
a2 = 0.5 * ( delta[1,:] - (ubar - cbar) * delta[0,:]) / cbar
# Compute each family of waves
wave[0,0,:] = a1
wave[1,0,:] = a1 * (ubar - cbar)
s[0,:] = ubar - cbar
wave[0,1,:] = a2
wave[1,1,:] = a2 * (ubar + cbar)
s[1,:] = ubar + cbar
if problem_data['efix']:
raise NotImplementedError("Entropy fix has not been implemented.")
else:
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_hll_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
HLL shallow water solver ::
W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2)
W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2)
Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2)
*problem_data* should contain:
- *g* - (float) Gravitational constant
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute Roe and right and left speeds
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
u_r = q_r[1,:] / q_r[0,:]
c_r = np.sqrt(problem_data['grav'] * q_r[0,:])
u_l = q_l[1,:] / q_l[0,:]
c_l = np.sqrt(problem_data['grav'] * q_l[0,:])
# Compute Einfeldt speeds
s_index = np.empty((4,num_rp))
s_index[0,:] = ubar+cbar
s_index[1,:] = ubar-cbar
s_index[2,:] = u_l + c_l
s_index[3,:] = u_l - c_l
s[0,:] = np.min(s_index,axis=0)
s_index[2,:] = u_r + c_r
s_index[3,:] = u_r - c_r
s[1,:] = np.max(s_index,axis=0)
# Compute middle state
q_hat = np.empty((2,num_rp))
q_hat[0,:] = ((q_r[1,:] - q_l[1,:] - s[1,:] * q_r[0,:]
+ s[0,:] * q_l[0,:]) / (s[0,:] - s[1,:]))
q_hat[1,:] = ((q_r[1,:]**2/q_r[0,:] + 0.5 * problem_data['grav'] * q_r[0,:]**2
- (q_l[1,:]**2/q_l[0,:] + 0.5 * problem_data['grav'] * q_l[0,:]**2)
- s[1,:] * q_r[1,:] + s[0,:] * q_l[1,:]) / (s[0,:] - s[1,:]))
# Compute each family of waves
wave[:,0,:] = q_hat - q_l
wave[:,1,:] = q_r - q_hat
# Compute variations
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_fwave_1d(q_l, q_r, aux_l, aux_r, problem_data):
r"""Shallow water Riemann solver using fwaves
Also includes support for bathymetry but be wary if you think you might have
dry states as this has not been tested.
*problem_data* should contain:
- *grav* - (float) Gravitational constant
- *sea_level* - (float) Datum from which the dry-state is calculated.
:Version: 1.0 (2014-09-05)
"""
g = problem_data['grav']
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
fwave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Extract state
u_l = np.where(q_l[0,:] - problem_data['sea_level'] > 1e-3,
q_l[1,:] / q_l[0,:], 0.0)
u_r = np.where(q_r[0,:] - problem_data['sea_level'] > 1e-3,
q_r[1,:] / q_r[0,:], 0.0)
phi_l = q_l[0,:] * u_l**2 + 0.5 * g * q_l[0,:]**2
phi_r = q_r[0,:] * u_r**2 + 0.5 * g * q_r[0,:]**2
# Speeds
s[0,:] = u_l - np.sqrt(g * q_l[0,:])
s[1,:] = u_r + np.sqrt(g * q_r[0,:])
delta1 = q_r[1,:] - q_l[1,:]
delta2 = phi_r - phi_l + g * 0.5 * (q_r[0,:] + q_l[0,:]) * (aux_r[0,:] - aux_l[0,:])
beta1 = (s[1,:] * delta1 - delta2) / (s[1,:] - s[0,:])
beta2 = (delta2 - s[0,:] * delta1) / (s[1,:] - s[0,:])
fwave[0,0,:] = beta1
fwave[1,0,:] = beta1 * s[0,:]
fwave[0,1,:] = beta2
fwave[1,1,:] = beta2 * s[1,:]
for m in xrange(num_eqn):
for mw in xrange(num_waves):
amdq[m,:] += (s[mw,:] < 0.0) * fwave[m,mw,:]
apdq[m,:] += (s[mw,:] >= 0.0) * fwave[m,mw,:]
return fwave, s, amdq, apdq
def shallow_exact_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Exact shallow water Riemann solver
.. warning::
This solver has not been implemented.
"""
raise NotImplementedError("The exact swe solver has not been implemented.") | en | 0.692874 | #!/usr/bin/env python # encoding: utf-8 Riemann solvers for the shallow water equations. The available solvers are: * Roe - Use Roe averages to caluclate the solution to the Riemann problem * HLL - Use a HLL solver * Exact - Use a newton iteration to calculate the exact solution to the Riemann problem .. math:: q_t + f(q)_x = 0 where .. math:: q(x,t) = \left [ \begin{array}{c} h \\ h u \end{array} \right ], the flux function is .. math:: f(q) = \left [ \begin{array}{c} h u \\ hu^2 + 1/2 g h^2 \end{array}\right ]. and :math:`h` is the water column height, :math:`u` the velocity and :math:`g` is the gravitational acceleration. :Authors: <NAME> (2009-02-05): Initial version # ============================================================================ # Copyright (C) 2009 <NAME> <<EMAIL>> # # Distributed under the terms of the Berkeley Software Distribution (BSD) # license # http://www.opensource.org/licenses/ # ============================================================================ Roe shallow water solver in 1d:: ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r)) cbar = sqrt( 0.5 * g * (h_l + h_r)) W_1 = | 1 | s_1 = ubar - cbar | ubar - cbar | W_2 = | 1 | s_1 = ubar + cbar | ubar + cbar | a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar *problem_data* should contain: - *g* - (float) Gravitational constant - *efix* - (bool) Boolean as to whether a entropy fix should be used, if not present, false is assumed :Version: 1.0 (2009-02-05) # Array shapes # Output arrays # Compute roe-averaged quantities # Compute Flux structure # Compute each family of waves HLL shallow water solver :: W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2) W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2) Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2) *problem_data* should contain: - *g* - (float) Gravitational constant :Version: 1.0 (2009-02-05) # Array shapes # Output arrays # Compute Roe and right and left speeds # Compute Einfeldt speeds # Compute middle state # Compute each family of waves # Compute variations Shallow water Riemann solver using fwaves Also includes support for bathymetry but be wary if you think you might have dry states as this has not been tested. *problem_data* should contain: - *grav* - (float) Gravitational constant - *sea_level* - (float) Datum from which the dry-state is calculated. :Version: 1.0 (2014-09-05) # Output arrays # Extract state # Speeds Exact shallow water Riemann solver .. warning:: This solver has not been implemented. | 3.035056 | 3 |
nuitka/Constants.py | juanfra684/Nuitka | 1 | 757 | # Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Module for constants in Nuitka.
This contains tools to compare, classify and test constants.
"""
import math
from types import BuiltinFunctionType
from nuitka.Builtins import builtin_type_names
from nuitka.PythonVersions import python_version
from .__past__ import ( # pylint: disable=I0021,redefined-builtin
iterItems,
long,
unicode,
xrange,
)
from .Builtins import (
builtin_anon_names,
builtin_anon_value_list,
builtin_exception_values_list,
builtin_named_values_list,
)
NoneType = type(None)
def compareConstants(a, b):
# Many many cases to deal with, pylint: disable=too-many-branches,too-many-return-statements
# Supposed fast path for comparison.
if type(a) is not type(b):
return False
# Now it's either not the same, or it is a container that contains NaN or it
# is a complex or float that is NaN, the other cases can use == at the end.
if type(a) is complex:
return compareConstants(a.imag, b.imag) and compareConstants(a.real, b.real)
if type(a) is float:
# Check sign first, -0.0 is not 0.0, or -nan is not nan, it has a
# different sign for a start.
if math.copysign(1.0, a) != math.copysign(1.0, b):
return False
if math.isnan(a) and math.isnan(b):
return True
return a == b
if type(a) in (tuple, list):
if len(a) != len(b):
return False
for ea, eb in zip(a, b):
if not compareConstants(ea, eb):
return False
return True
if type(a) is dict:
if len(a) != len(b):
return False
for ea1, ea2 in iterItems(a):
for eb1, eb2 in iterItems(b):
if compareConstants(ea1, eb1) and compareConstants(ea2, eb2):
break
else:
return False
return True
if type(a) in (frozenset, set):
if len(a) != len(b):
return False
for ea in a:
if ea not in b:
# Due to NaN values, we need to compare each set element with
# all the other set to be really sure.
for eb in b:
if compareConstants(ea, eb):
break
else:
return False
return True
if type(a) is xrange:
return str(a) == str(b)
# The NaN values of float and complex may let this fail, even if the
# constants are built in the same way, therefore above checks.
return a == b
# These built-in type references are kind of constant too. The list should be
# complete.
constant_builtin_types = (
int,
str,
float,
list,
tuple,
set,
dict,
slice,
complex,
xrange,
NoneType,
)
if python_version >= 300:
constant_builtin_types += (bytes,)
else:
constant_builtin_types += (
unicode,
long,
# This has no name in Python, but the natural one in C-API.
builtin_anon_names["instance"],
)
def isConstant(constant):
# Too many cases and all return, that is how we do it here,
# pylint: disable=too-many-branches,too-many-return-statements
constant_type = type(constant)
if constant_type is dict:
for key, value in iterItems(constant):
if not isConstant(key):
return False
if not isConstant(value):
return False
return True
elif constant_type in (tuple, list):
for element_value in constant:
if not isConstant(element_value):
return False
return True
elif constant_type is slice:
if (
not isConstant(constant.start)
or not isConstant(constant.stop)
or not isConstant(constant.step)
):
return False
return True
elif constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
range,
bytes,
set,
frozenset,
xrange,
bytearray,
):
return True
elif constant in (Ellipsis, NoneType, NotImplemented):
return True
elif constant in builtin_anon_value_list:
return True
elif constant_type is type:
# Maybe pre-build this as a set for quicker testing.
return (
constant.__name__ in builtin_type_names
or constant in builtin_exception_values_list
)
elif constant_type is BuiltinFunctionType and constant in builtin_named_values_list:
# TODO: Some others could also be usable and even interesting, but
# then probably should go into other node types, e.g. str.join is
# a candidate.
return True
else:
return False
def isMutable(constant):
""" Is a constant mutable
That means a user of a reference to it, can modify it. Strings are
a prime example of immutable, dictionaries are mutable.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
range,
bytes,
slice,
xrange,
type,
BuiltinFunctionType,
):
return False
elif constant_type in (dict, list, set, bytearray):
return True
elif constant_type is tuple:
for value in constant:
if isMutable(value):
return True
return False
elif constant_type is frozenset:
for value in constant:
if isMutable(value):
return True
return False
elif constant is Ellipsis:
return False
elif constant is NotImplemented:
return False
else:
assert False, repr(constant)
def isHashable(constant):
""" Is a constant hashable
That means a user of a reference to it, can use it for dicts and set
keys. This is distinct from mutable, there is one types that is not
mutable, and still not hashable: slices.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
xrange,
bytes,
type,
BuiltinFunctionType,
):
return True
elif constant_type in (dict, list, set, slice, bytearray):
return False
elif constant_type is tuple:
for value in constant:
if not isHashable(value):
return False
return True
elif constant_type is frozenset:
for value in constant:
if not isHashable(value):
return False
return True
elif constant is Ellipsis:
return True
else:
assert False, constant_type
def getUnhashableConstant(constant):
# Too many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
xrange,
bytes,
type,
BuiltinFunctionType,
):
return None
elif constant_type in (dict, list, set):
return constant
elif constant_type is tuple:
for value in constant:
res = getUnhashableConstant(value)
if res is not None:
return res
return None
elif constant is Ellipsis:
return None
elif constant in constant_builtin_types:
return None
elif constant_type is slice:
return None
else:
assert False, constant_type
def isIterableConstant(constant):
return type(constant) in (
str,
unicode,
list,
tuple,
set,
frozenset,
dict,
xrange,
bytes,
bytearray,
)
def getConstantIterationLength(constant):
assert isIterableConstant(constant)
return len(constant)
def isNumberConstant(constant):
return type(constant) in (int, long, float, bool)
def isIndexConstant(constant):
return type(constant) in (int, long, bool)
def createConstantDict(keys, values):
# Create it proper size immediately.
constant_value = dict.fromkeys(keys, None)
for key, value in zip(keys, values):
constant_value[key] = value
return constant_value
def getConstantWeight(constant):
constant_type = type(constant)
if constant_type is dict:
result = 0
for key, value in iterItems(constant):
result += getConstantWeight(key)
result += getConstantWeight(value)
return result
elif constant_type in (tuple, list, set, frozenset):
result = 0
for element_value in constant:
result += getConstantWeight(element_value)
return result
else:
return 1
def isCompileTimeConstantValue(value):
""" Determine if a value will be usable at compile time.
"""
# This needs to match code in makeCompileTimeConstantReplacementNode
if isConstant(value):
return True
elif type(value) is type:
return True
else:
return False
| # Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Module for constants in Nuitka.
This contains tools to compare, classify and test constants.
"""
import math
from types import BuiltinFunctionType
from nuitka.Builtins import builtin_type_names
from nuitka.PythonVersions import python_version
from .__past__ import ( # pylint: disable=I0021,redefined-builtin
iterItems,
long,
unicode,
xrange,
)
from .Builtins import (
builtin_anon_names,
builtin_anon_value_list,
builtin_exception_values_list,
builtin_named_values_list,
)
NoneType = type(None)
def compareConstants(a, b):
# Many many cases to deal with, pylint: disable=too-many-branches,too-many-return-statements
# Supposed fast path for comparison.
if type(a) is not type(b):
return False
# Now it's either not the same, or it is a container that contains NaN or it
# is a complex or float that is NaN, the other cases can use == at the end.
if type(a) is complex:
return compareConstants(a.imag, b.imag) and compareConstants(a.real, b.real)
if type(a) is float:
# Check sign first, -0.0 is not 0.0, or -nan is not nan, it has a
# different sign for a start.
if math.copysign(1.0, a) != math.copysign(1.0, b):
return False
if math.isnan(a) and math.isnan(b):
return True
return a == b
if type(a) in (tuple, list):
if len(a) != len(b):
return False
for ea, eb in zip(a, b):
if not compareConstants(ea, eb):
return False
return True
if type(a) is dict:
if len(a) != len(b):
return False
for ea1, ea2 in iterItems(a):
for eb1, eb2 in iterItems(b):
if compareConstants(ea1, eb1) and compareConstants(ea2, eb2):
break
else:
return False
return True
if type(a) in (frozenset, set):
if len(a) != len(b):
return False
for ea in a:
if ea not in b:
# Due to NaN values, we need to compare each set element with
# all the other set to be really sure.
for eb in b:
if compareConstants(ea, eb):
break
else:
return False
return True
if type(a) is xrange:
return str(a) == str(b)
# The NaN values of float and complex may let this fail, even if the
# constants are built in the same way, therefore above checks.
return a == b
# These built-in type references are kind of constant too. The list should be
# complete.
constant_builtin_types = (
int,
str,
float,
list,
tuple,
set,
dict,
slice,
complex,
xrange,
NoneType,
)
if python_version >= 300:
constant_builtin_types += (bytes,)
else:
constant_builtin_types += (
unicode,
long,
# This has no name in Python, but the natural one in C-API.
builtin_anon_names["instance"],
)
def isConstant(constant):
# Too many cases and all return, that is how we do it here,
# pylint: disable=too-many-branches,too-many-return-statements
constant_type = type(constant)
if constant_type is dict:
for key, value in iterItems(constant):
if not isConstant(key):
return False
if not isConstant(value):
return False
return True
elif constant_type in (tuple, list):
for element_value in constant:
if not isConstant(element_value):
return False
return True
elif constant_type is slice:
if (
not isConstant(constant.start)
or not isConstant(constant.stop)
or not isConstant(constant.step)
):
return False
return True
elif constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
range,
bytes,
set,
frozenset,
xrange,
bytearray,
):
return True
elif constant in (Ellipsis, NoneType, NotImplemented):
return True
elif constant in builtin_anon_value_list:
return True
elif constant_type is type:
# Maybe pre-build this as a set for quicker testing.
return (
constant.__name__ in builtin_type_names
or constant in builtin_exception_values_list
)
elif constant_type is BuiltinFunctionType and constant in builtin_named_values_list:
# TODO: Some others could also be usable and even interesting, but
# then probably should go into other node types, e.g. str.join is
# a candidate.
return True
else:
return False
def isMutable(constant):
""" Is a constant mutable
That means a user of a reference to it, can modify it. Strings are
a prime example of immutable, dictionaries are mutable.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
range,
bytes,
slice,
xrange,
type,
BuiltinFunctionType,
):
return False
elif constant_type in (dict, list, set, bytearray):
return True
elif constant_type is tuple:
for value in constant:
if isMutable(value):
return True
return False
elif constant_type is frozenset:
for value in constant:
if isMutable(value):
return True
return False
elif constant is Ellipsis:
return False
elif constant is NotImplemented:
return False
else:
assert False, repr(constant)
def isHashable(constant):
""" Is a constant hashable
That means a user of a reference to it, can use it for dicts and set
keys. This is distinct from mutable, there is one types that is not
mutable, and still not hashable: slices.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
xrange,
bytes,
type,
BuiltinFunctionType,
):
return True
elif constant_type in (dict, list, set, slice, bytearray):
return False
elif constant_type is tuple:
for value in constant:
if not isHashable(value):
return False
return True
elif constant_type is frozenset:
for value in constant:
if not isHashable(value):
return False
return True
elif constant is Ellipsis:
return True
else:
assert False, constant_type
def getUnhashableConstant(constant):
# Too many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
xrange,
bytes,
type,
BuiltinFunctionType,
):
return None
elif constant_type in (dict, list, set):
return constant
elif constant_type is tuple:
for value in constant:
res = getUnhashableConstant(value)
if res is not None:
return res
return None
elif constant is Ellipsis:
return None
elif constant in constant_builtin_types:
return None
elif constant_type is slice:
return None
else:
assert False, constant_type
def isIterableConstant(constant):
return type(constant) in (
str,
unicode,
list,
tuple,
set,
frozenset,
dict,
xrange,
bytes,
bytearray,
)
def getConstantIterationLength(constant):
assert isIterableConstant(constant)
return len(constant)
def isNumberConstant(constant):
return type(constant) in (int, long, float, bool)
def isIndexConstant(constant):
return type(constant) in (int, long, bool)
def createConstantDict(keys, values):
# Create it proper size immediately.
constant_value = dict.fromkeys(keys, None)
for key, value in zip(keys, values):
constant_value[key] = value
return constant_value
def getConstantWeight(constant):
constant_type = type(constant)
if constant_type is dict:
result = 0
for key, value in iterItems(constant):
result += getConstantWeight(key)
result += getConstantWeight(value)
return result
elif constant_type in (tuple, list, set, frozenset):
result = 0
for element_value in constant:
result += getConstantWeight(element_value)
return result
else:
return 1
def isCompileTimeConstantValue(value):
""" Determine if a value will be usable at compile time.
"""
# This needs to match code in makeCompileTimeConstantReplacementNode
if isConstant(value):
return True
elif type(value) is type:
return True
else:
return False
| en | 0.908757 | # Copyright 2020, <NAME>, mailto:<EMAIL> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Module for constants in Nuitka. This contains tools to compare, classify and test constants. # pylint: disable=I0021,redefined-builtin # Many many cases to deal with, pylint: disable=too-many-branches,too-many-return-statements # Supposed fast path for comparison. # Now it's either not the same, or it is a container that contains NaN or it # is a complex or float that is NaN, the other cases can use == at the end. # Check sign first, -0.0 is not 0.0, or -nan is not nan, it has a # different sign for a start. # Due to NaN values, we need to compare each set element with # all the other set to be really sure. # The NaN values of float and complex may let this fail, even if the # constants are built in the same way, therefore above checks. # These built-in type references are kind of constant too. The list should be # complete. # This has no name in Python, but the natural one in C-API. # Too many cases and all return, that is how we do it here, # pylint: disable=too-many-branches,too-many-return-statements # Maybe pre-build this as a set for quicker testing. # TODO: Some others could also be usable and even interesting, but # then probably should go into other node types, e.g. str.join is # a candidate. Is a constant mutable That means a user of a reference to it, can modify it. Strings are a prime example of immutable, dictionaries are mutable. # Many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements Is a constant hashable That means a user of a reference to it, can use it for dicts and set keys. This is distinct from mutable, there is one types that is not mutable, and still not hashable: slices. # Many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements # Too many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements # Create it proper size immediately. Determine if a value will be usable at compile time. # This needs to match code in makeCompileTimeConstantReplacementNode | 1.973974 | 2 |
functions/predictionLambda/botocore/endpoint.py | chriscoombs/aws-comparing-algorithms-performance-mlops-cdk | 40 | 758 | # Copyright (c) 2012-2013 <NAME> http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix, event_emitter,
response_parser_factory=None, http_session=None):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
self.http_session = http_session
if self.http_session is None:
self.http_session = URLLib3Session()
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s with params: %s",
operation_model, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
responses = self._event_emitter.emit(event_name, request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = self._send(request)
except HTTPClientError as e:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response, operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
# Do a second parsing pass to pick up on any modeled error fields
# NOTE: Ideally, we would push this down into the parser classes but
# they currently have no reference to the operation or service model
# The parsers should probably take the operation model instead of
# output shape but we can't change that now
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
def _add_modeled_error_fields(
self, response_dict, parsed_response,
operation_model, parser,
):
error_code = parsed_response.get("Error", {}).get("Code")
if error_code is None:
return
service_model = operation_model.service_model
error_shape = service_model.shape_for_error_code(error_code)
if error_shape is None:
return
modeled_parse = parser.parse(response_dict, error_shape)
# TODO: avoid naming conflicts with ResponseMetadata and Error
parsed_response.update(modeled_parse)
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
def _send(self, request):
return self.http_session.send(request)
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=URLLib3Session,
proxies=None,
socket_options=None,
client_cert=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
http_session = http_session_cls(
timeout=timeout,
proxies=proxies,
verify=self._get_verify_value(verify),
max_pool_connections=max_pool_connections,
socket_options=socket_options,
client_cert=client_cert,
)
return Endpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=http_session
)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
| # Copyright (c) 2012-2013 <NAME> http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix, event_emitter,
response_parser_factory=None, http_session=None):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
self.http_session = http_session
if self.http_session is None:
self.http_session = URLLib3Session()
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s with params: %s",
operation_model, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
responses = self._event_emitter.emit(event_name, request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = self._send(request)
except HTTPClientError as e:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response, operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
# Do a second parsing pass to pick up on any modeled error fields
# NOTE: Ideally, we would push this down into the parser classes but
# they currently have no reference to the operation or service model
# The parsers should probably take the operation model instead of
# output shape but we can't change that now
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
def _add_modeled_error_fields(
self, response_dict, parsed_response,
operation_model, parser,
):
error_code = parsed_response.get("Error", {}).get("Code")
if error_code is None:
return
service_model = operation_model.service_model
error_shape = service_model.shape_for_error_code(error_code)
if error_shape is None:
return
modeled_parse = parser.parse(response_dict, error_shape)
# TODO: avoid naming conflicts with ResponseMetadata and Error
parsed_response.update(modeled_parse)
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
def _send(self, request):
return self.http_session.send(request)
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=URLLib3Session,
proxies=None,
socket_options=None,
client_cert=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
http_session = http_session_cls(
timeout=timeout,
proxies=proxies,
verify=self._get_verify_value(verify),
max_pool_connections=max_pool_connections,
socket_options=socket_options,
client_cert=client_cert,
)
return Endpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=http_session
)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
| en | 0.852484 | # Copyright (c) 2012-2013 <NAME> http://garnaat.org/ # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. Convert an HTTP response object to a request dict. This converts the requests library's HTTP response object to a dictionary. :type http_response: botocore.vendored.requests.model.Response :param http_response: The HTTP response from an AWS service request. :rtype: dict :return: A response dictionary which will contain the following keys: * headers (dict) * status_code (int) * body (string or file-like object) Represents an endpoint for a particular service in a specific region. Only an endpoint can make requests. :ivar service: The Service object that describes this endpoints service. :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. # In place encoding of headers to utf-8 if they are unicode. # If there is a stream associated with the request, we need # to reset it before attempting to send the request again. # This will ensure that we resend the entire contents of the # body. # Create a new request when retried (including a new signature). # We want to share num retries, not num attempts. # This will return a tuple of (success_response, exception) # and success_response is itself a tuple of # (http_response, parsed_dict). # If an exception occurs then the success_response is None. # If no exception occurs then exception is None. # This returns the http_response and the parsed_data. # Do a second parsing pass to pick up on any modeled error fields # NOTE: Ideally, we would push this down into the parser classes but # they currently have no reference to the operation or service model # The parsers should probably take the operation model instead of # output shape but we can't change that now # TODO: avoid naming conflicts with ResponseMetadata and Error # Request needs to be retried, and we need to sleep # for the specified number of times. # We could also support getting proxies from a config file, # but for now proxy support is taken from the environment. # This is to account for: # https://github.com/kennethreitz/requests/issues/1436 # where we need to honor REQUESTS_CA_BUNDLE because we're creating our # own request objects. # First, if verify is not None, then the user explicitly specified # a value so this automatically wins. # Otherwise use the value from REQUESTS_CA_BUNDLE, or default to # True if the env var does not exist. | 2.085861 | 2 |
week2/Assignment2Answer.py | RayshineRen/Introduction_to_Data_Science_in_Python | 1 | 759 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 21:56:15 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305
"""
'''
Question 1
Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree.
This function should return a dictionary in the form of (use the correct numbers, do not round numbers):
{"less than high school":0.2,
"high school":0.4,
"more than high school but not college":0.2,
"college":0.2}
'''
import scipy.stats as stats
import numpy as np
import pandas as pd
df = pd.read_csv("./assets/NISPUF17.csv")
def proportion_of_education():
# your code goes here
# YOUR CODE HERE
df_edu = df.EDUC1
edu_list = [1, 2, 3, 4]
zero_df = pd.DataFrame(np.zeros((df_edu.shape[0], len(edu_list))), columns=edu_list)
for edu in edu_list:
zero_df[edu][df_edu==edu]=1
#zero_df
sum_ret = zero_df.sum(axis=0)
name_l = ["less than high school", "high school", "more than high school but not college", "college"]
rat = sum_ret.values/sum(sum_ret.values)
dic = dict()
for i in range(4):
dic[name_l[i]] = rat[i]
return dic
raise NotImplementedError()
assert type(proportion_of_education())==type({}), "You must return a dictionary."
assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it."
assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct"
'''
Question 2
Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.
This function should return a tuple in the form (use the correct numbers:
(2.5, 0.1)
'''
def average_influenza_doses():
# YOUR CODE HERE
#是否喂养母乳
fed_breastmilk = list(df.groupby(by='CBF_01'))
be_fed_breastmilk = fed_breastmilk[0][1]
not_fed_breastmilk = fed_breastmilk[1][1]
#喂养母乳的influenza数目
be_fed_breastmilk_influenza = be_fed_breastmilk.P_NUMFLU
num_be_fed_breastmilk_influenza = be_fed_breastmilk_influenza.dropna().mean()
#未喂养母乳的influenza数目
not_be_fed_breastmilk_influenza = not_fed_breastmilk.P_NUMFLU
num_not_be_fed_breastmilk_influenza = not_be_fed_breastmilk_influenza.dropna().mean()
return num_be_fed_breastmilk_influenza, num_not_be_fed_breastmilk_influenza
raise NotImplementedError()
assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no."
'''
Question 3
It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex.
This function should return a dictionary in the form of (use the correct numbers):
{"male":0.2,
"female":0.4}
Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077.
'''
def chickenpox_by_sex():
# YOUR CODE HERE
#是否感染Varicella
cpox = df.HAD_CPOX
#cpox.value_counts()
cpox_group = list(df.groupby(by='HAD_CPOX'))
have_cpox = cpox_group[0][1]
not_have_cpox = cpox_group[1][1]
#男女分开
have_cpox_group = list(have_cpox.groupby(by='SEX'))
not_have_cpox_group = list(not_have_cpox.groupby(by='SEX'))
have_cpox_boy = have_cpox_group[0][1]
have_cpox_girl = have_cpox_group[1][1]
not_have_cpox_boy = not_have_cpox_group[0][1]
not_have_cpox_girl = not_have_cpox_group[1][1]
#接种感染
#have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)]
have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMVRC']>0)]
num_have_cpox_boy_injected = have_cpox_boy_injected.count()['SEQNUMC']
have_cpox_girl_injected = have_cpox_girl[(have_cpox_girl['P_NUMVRC']>0)]
num_have_cpox_girl_injected = have_cpox_girl_injected.count()['SEQNUMC']
#接种未感染
not_have_cpox_boy_injected = not_have_cpox_boy[(not_have_cpox_boy['P_NUMVRC']>0)]
num_not_have_cpox_boy_injected = not_have_cpox_boy_injected.count()['SEQNUMC']
not_have_cpox_girl_injected = not_have_cpox_girl[(not_have_cpox_girl['P_NUMVRC']>0)]
num_not_have_cpox_girl_injected = not_have_cpox_girl_injected.count()['SEQNUMC']
#计算比例
ratio_boy = num_have_cpox_boy_injected / num_not_have_cpox_boy_injected
ratio_girl = num_have_cpox_girl_injected / num_not_have_cpox_girl_injected
dic = {}
dic['male'] = ratio_boy
dic['female'] = ratio_girl
return dic
raise NotImplementedError()
assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females."
'''
Question 4
A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).
Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.
Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number).
[1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose?
'''
def corr_chickenpox():
cpox = df[(df.P_NUMVRC).notnull()]
have_cpox = cpox[(cpox.HAD_CPOX==1) | (cpox.HAD_CPOX==2)]
df1=pd.DataFrame({"had_chickenpox_column":have_cpox.HAD_CPOX,
"num_chickenpox_vaccine_column":have_cpox.P_NUMVRC})
corr, pval=stats.pearsonr(df1["had_chickenpox_column"],df1["num_chickenpox_vaccine_column"])
return corr
raise NotImplementedError()
| # -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 21:56:15 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305
"""
'''
Question 1
Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree.
This function should return a dictionary in the form of (use the correct numbers, do not round numbers):
{"less than high school":0.2,
"high school":0.4,
"more than high school but not college":0.2,
"college":0.2}
'''
import scipy.stats as stats
import numpy as np
import pandas as pd
df = pd.read_csv("./assets/NISPUF17.csv")
def proportion_of_education():
# your code goes here
# YOUR CODE HERE
df_edu = df.EDUC1
edu_list = [1, 2, 3, 4]
zero_df = pd.DataFrame(np.zeros((df_edu.shape[0], len(edu_list))), columns=edu_list)
for edu in edu_list:
zero_df[edu][df_edu==edu]=1
#zero_df
sum_ret = zero_df.sum(axis=0)
name_l = ["less than high school", "high school", "more than high school but not college", "college"]
rat = sum_ret.values/sum(sum_ret.values)
dic = dict()
for i in range(4):
dic[name_l[i]] = rat[i]
return dic
raise NotImplementedError()
assert type(proportion_of_education())==type({}), "You must return a dictionary."
assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it."
assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct"
'''
Question 2
Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.
This function should return a tuple in the form (use the correct numbers:
(2.5, 0.1)
'''
def average_influenza_doses():
# YOUR CODE HERE
#是否喂养母乳
fed_breastmilk = list(df.groupby(by='CBF_01'))
be_fed_breastmilk = fed_breastmilk[0][1]
not_fed_breastmilk = fed_breastmilk[1][1]
#喂养母乳的influenza数目
be_fed_breastmilk_influenza = be_fed_breastmilk.P_NUMFLU
num_be_fed_breastmilk_influenza = be_fed_breastmilk_influenza.dropna().mean()
#未喂养母乳的influenza数目
not_be_fed_breastmilk_influenza = not_fed_breastmilk.P_NUMFLU
num_not_be_fed_breastmilk_influenza = not_be_fed_breastmilk_influenza.dropna().mean()
return num_be_fed_breastmilk_influenza, num_not_be_fed_breastmilk_influenza
raise NotImplementedError()
assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no."
'''
Question 3
It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex.
This function should return a dictionary in the form of (use the correct numbers):
{"male":0.2,
"female":0.4}
Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077.
'''
def chickenpox_by_sex():
# YOUR CODE HERE
#是否感染Varicella
cpox = df.HAD_CPOX
#cpox.value_counts()
cpox_group = list(df.groupby(by='HAD_CPOX'))
have_cpox = cpox_group[0][1]
not_have_cpox = cpox_group[1][1]
#男女分开
have_cpox_group = list(have_cpox.groupby(by='SEX'))
not_have_cpox_group = list(not_have_cpox.groupby(by='SEX'))
have_cpox_boy = have_cpox_group[0][1]
have_cpox_girl = have_cpox_group[1][1]
not_have_cpox_boy = not_have_cpox_group[0][1]
not_have_cpox_girl = not_have_cpox_group[1][1]
#接种感染
#have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)]
have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMVRC']>0)]
num_have_cpox_boy_injected = have_cpox_boy_injected.count()['SEQNUMC']
have_cpox_girl_injected = have_cpox_girl[(have_cpox_girl['P_NUMVRC']>0)]
num_have_cpox_girl_injected = have_cpox_girl_injected.count()['SEQNUMC']
#接种未感染
not_have_cpox_boy_injected = not_have_cpox_boy[(not_have_cpox_boy['P_NUMVRC']>0)]
num_not_have_cpox_boy_injected = not_have_cpox_boy_injected.count()['SEQNUMC']
not_have_cpox_girl_injected = not_have_cpox_girl[(not_have_cpox_girl['P_NUMVRC']>0)]
num_not_have_cpox_girl_injected = not_have_cpox_girl_injected.count()['SEQNUMC']
#计算比例
ratio_boy = num_have_cpox_boy_injected / num_not_have_cpox_boy_injected
ratio_girl = num_have_cpox_girl_injected / num_not_have_cpox_girl_injected
dic = {}
dic['male'] = ratio_boy
dic['female'] = ratio_girl
return dic
raise NotImplementedError()
assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females."
'''
Question 4
A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).
Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.
Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number).
[1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose?
'''
def corr_chickenpox():
cpox = df[(df.P_NUMVRC).notnull()]
have_cpox = cpox[(cpox.HAD_CPOX==1) | (cpox.HAD_CPOX==2)]
df1=pd.DataFrame({"had_chickenpox_column":have_cpox.HAD_CPOX,
"num_chickenpox_vaccine_column":have_cpox.P_NUMVRC})
corr, pval=stats.pearsonr(df1["had_chickenpox_column"],df1["num_chickenpox_vaccine_column"])
return corr
raise NotImplementedError() | en | 0.927212 | # -*- coding: utf-8 -*- Created on Fri Sep 18 21:56:15 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305 Question 1
Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree.
This function should return a dictionary in the form of (use the correct numbers, do not round numbers):
{"less than high school":0.2,
"high school":0.4,
"more than high school but not college":0.2,
"college":0.2} # your code goes here # YOUR CODE HERE #zero_df Question 2
Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.
This function should return a tuple in the form (use the correct numbers:
(2.5, 0.1) # YOUR CODE HERE #是否喂养母乳 #喂养母乳的influenza数目 #未喂养母乳的influenza数目 Question 3
It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex.
This function should return a dictionary in the form of (use the correct numbers):
{"male":0.2,
"female":0.4}
Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077. # YOUR CODE HERE #是否感染Varicella #cpox.value_counts() #男女分开 #接种感染 #have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)] #接种未感染 #计算比例 Question 4
A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).
Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.
Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number).
[1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose? | 3.775192 | 4 |
backup/26.py | accordinglyto/dferte | 0 | 760 | from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import matplotlib
# Input your csv file here with historical data
ad = genfromtxt(f"../financial_data/SM.csv", delimiter=",", dtype=str)
def convolve_sma(array, period):
return np.convolve(array, np.ones((period,)) / period, mode="valid")
def graphwerk(start, finish):
open = []
high = []
low = []
close = []
volume = []
# decision = []
date = []
c_open = []
c_high = []
c_low = []
c_close = []
c_volume = []
c_date = []
c_start = start + 12
for x in range(finish - start):
c_open.append(float(pd[c_start][1]))
c_high.append(float(pd[c_start][2]))
c_low.append(float(pd[c_start][3]))
c_close.append(float(pd[c_start][4]))
c_volume.append(float(pd[c_start][5]))
c_date.append(pd[c_start][0])
c_start = c_start + 1
for x in range(finish - start):
# Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out
# what means open, high and close in their respective order.
open.append(float(pd[start][1]))
high.append(float(pd[start][2]))
low.append(float(pd[start][3]))
close.append(float(pd[start][4]))
volume.append(float(pd[start][5]))
# decision.append(str(pd[start][6]))
date.append(pd[start][0])
start = start + 1
decision = "sell"
min_forecast = min(c_low)
max_forecast = max(c_high)
if close[-1] * 1.03 < max_forecast:
decision = "buy"
# for z in all_prices:
# if close[-1] * 1.03 < z:
# decision = "buy"
sma = convolve_sma(close, 5)
smb = list(sma)
diff = sma[-1] - sma[-2]
for x in range(len(close) - len(smb)):
smb.append(smb[-1] + diff)
fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor="w", edgecolor="k")
dx = fig.add_subplot(111)
# mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)
mpl_finance.candlestick2_ochl(
dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5
)
plt.autoscale()
# plt.plot(smb, color="blue", linewidth=10, alpha=0.5)
plt.axis("off")
if decision == "sell":
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("sell")
plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
else:
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("buy")
plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
# if close[-1] >= close_next:
# print('previous value is bigger')
# print('last value: ' + str(close[-1]))
# print('next value: ' + str(close_next))
# print('sell')
# plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')
# else:
# print('previous value is smaller')
# print('last value: '+ str(close[-1]))
# print('next value: ' + str(close_next))
# print('buy')
# plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
# plt.show()
open.clear()
close.clear()
volume.clear()
high.clear()
low.clear()
plt.cla()
plt.clf()
# output = []
# with open("STOCKbluechip.csv") as f:
# output = [str(s) for line in f.readlines() for s in line[:-1].split(",")]
# for stock in output:
pd = ad
buy_dir = "../data/train/buy/"
sell_dir = "../data/train/sell/"
iter = 0
for x in range(len(pd)):
graphwerk(iter, iter + 12)
iter = iter + 2
| from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import matplotlib
# Input your csv file here with historical data
ad = genfromtxt(f"../financial_data/SM.csv", delimiter=",", dtype=str)
def convolve_sma(array, period):
return np.convolve(array, np.ones((period,)) / period, mode="valid")
def graphwerk(start, finish):
open = []
high = []
low = []
close = []
volume = []
# decision = []
date = []
c_open = []
c_high = []
c_low = []
c_close = []
c_volume = []
c_date = []
c_start = start + 12
for x in range(finish - start):
c_open.append(float(pd[c_start][1]))
c_high.append(float(pd[c_start][2]))
c_low.append(float(pd[c_start][3]))
c_close.append(float(pd[c_start][4]))
c_volume.append(float(pd[c_start][5]))
c_date.append(pd[c_start][0])
c_start = c_start + 1
for x in range(finish - start):
# Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out
# what means open, high and close in their respective order.
open.append(float(pd[start][1]))
high.append(float(pd[start][2]))
low.append(float(pd[start][3]))
close.append(float(pd[start][4]))
volume.append(float(pd[start][5]))
# decision.append(str(pd[start][6]))
date.append(pd[start][0])
start = start + 1
decision = "sell"
min_forecast = min(c_low)
max_forecast = max(c_high)
if close[-1] * 1.03 < max_forecast:
decision = "buy"
# for z in all_prices:
# if close[-1] * 1.03 < z:
# decision = "buy"
sma = convolve_sma(close, 5)
smb = list(sma)
diff = sma[-1] - sma[-2]
for x in range(len(close) - len(smb)):
smb.append(smb[-1] + diff)
fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor="w", edgecolor="k")
dx = fig.add_subplot(111)
# mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)
mpl_finance.candlestick2_ochl(
dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5
)
plt.autoscale()
# plt.plot(smb, color="blue", linewidth=10, alpha=0.5)
plt.axis("off")
if decision == "sell":
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("sell")
plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
else:
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("buy")
plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
# if close[-1] >= close_next:
# print('previous value is bigger')
# print('last value: ' + str(close[-1]))
# print('next value: ' + str(close_next))
# print('sell')
# plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')
# else:
# print('previous value is smaller')
# print('last value: '+ str(close[-1]))
# print('next value: ' + str(close_next))
# print('buy')
# plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
# plt.show()
open.clear()
close.clear()
volume.clear()
high.clear()
low.clear()
plt.cla()
plt.clf()
# output = []
# with open("STOCKbluechip.csv") as f:
# output = [str(s) for line in f.readlines() for s in line[:-1].split(",")]
# for stock in output:
pd = ad
buy_dir = "../data/train/buy/"
sell_dir = "../data/train/sell/"
iter = 0
for x in range(len(pd)):
graphwerk(iter, iter + 12)
iter = iter + 2
| en | 0.57692 | # Input your csv file here with historical data # decision = [] # Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out # what means open, high and close in their respective order. # decision.append(str(pd[start][6])) # for z in all_prices: # if close[-1] * 1.03 < z: # decision = "buy" # mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1) # plt.plot(smb, color="blue", linewidth=10, alpha=0.5) # if close[-1] >= close_next: # print('previous value is bigger') # print('last value: ' + str(close[-1])) # print('next value: ' + str(close_next)) # print('sell') # plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight') # else: # print('previous value is smaller') # print('last value: '+ str(close[-1])) # print('next value: ' + str(close_next)) # print('buy') # plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight') # plt.show() # output = [] # with open("STOCKbluechip.csv") as f: # output = [str(s) for line in f.readlines() for s in line[:-1].split(",")] # for stock in output: | 2.685668 | 3 |
streams/readers/arff_reader.py | JanSurft/tornado | 103 | 761 | """
The Tornado Framework
By <NAME>
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import re
from data_structures.attribute import Attribute
from dictionary.tornado_dictionary import TornadoDic
class ARFFReader:
"""This class is used to read a .arff file."""
@staticmethod
def read(file_path):
labels = []
attributes = []
attributes_min_max = []
records = []
data_flag = False
reader = open(file_path, "r")
for line in reader:
if line.strip() == '':
continue
if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"):
line = line.strip('\n\r\t')
line = line.split(' ')
attribute_name = line[1]
attribute_value_range = line[2]
attribute = Attribute()
attribute.set_name(attribute_name)
if attribute_value_range.lower() in ['numeric', 'real', 'integer']:
attribute_type = TornadoDic.NUMERIC_ATTRIBUTE
attribute_value_range = []
attributes_min_max.append([0, 0])
else:
attribute_type = TornadoDic.NOMINAL_ATTRIBUTE
attribute_value_range = attribute_value_range.strip('{}').replace("'", "")
attribute_value_range = attribute_value_range.split(',')
attributes_min_max.append([None, None])
attribute.set_type(attribute_type)
attribute.set_possible_values(attribute_value_range)
attributes.append(attribute)
elif line.startswith("@data") or line.startswith("@DATA"):
data_flag = True
labels = attributes[len(attributes) - 1].POSSIBLE_VALUES
attributes.pop(len(attributes) - 1)
continue
elif data_flag is True:
line = re.sub('\s+', '', line)
elements = line.split(',')
for i in range(0, len(elements) - 1):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
elements[i] = float(elements[i])
min_value = attributes_min_max[i][0]
max_value = attributes_min_max[i][1]
if elements[i] < min_value:
min_value = elements[i]
elif elements[i] > max_value:
max_value = elements[i]
attributes_min_max[i] = [min_value, max_value]
records.append(elements)
for i in range(0, len(attributes)):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1])
return labels, attributes, records
| """
The Tornado Framework
By <NAME>
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import re
from data_structures.attribute import Attribute
from dictionary.tornado_dictionary import TornadoDic
class ARFFReader:
"""This class is used to read a .arff file."""
@staticmethod
def read(file_path):
labels = []
attributes = []
attributes_min_max = []
records = []
data_flag = False
reader = open(file_path, "r")
for line in reader:
if line.strip() == '':
continue
if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"):
line = line.strip('\n\r\t')
line = line.split(' ')
attribute_name = line[1]
attribute_value_range = line[2]
attribute = Attribute()
attribute.set_name(attribute_name)
if attribute_value_range.lower() in ['numeric', 'real', 'integer']:
attribute_type = TornadoDic.NUMERIC_ATTRIBUTE
attribute_value_range = []
attributes_min_max.append([0, 0])
else:
attribute_type = TornadoDic.NOMINAL_ATTRIBUTE
attribute_value_range = attribute_value_range.strip('{}').replace("'", "")
attribute_value_range = attribute_value_range.split(',')
attributes_min_max.append([None, None])
attribute.set_type(attribute_type)
attribute.set_possible_values(attribute_value_range)
attributes.append(attribute)
elif line.startswith("@data") or line.startswith("@DATA"):
data_flag = True
labels = attributes[len(attributes) - 1].POSSIBLE_VALUES
attributes.pop(len(attributes) - 1)
continue
elif data_flag is True:
line = re.sub('\s+', '', line)
elements = line.split(',')
for i in range(0, len(elements) - 1):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
elements[i] = float(elements[i])
min_value = attributes_min_max[i][0]
max_value = attributes_min_max[i][1]
if elements[i] < min_value:
min_value = elements[i]
elif elements[i] > max_value:
max_value = elements[i]
attributes_min_max[i] = [min_value, max_value]
records.append(elements)
for i in range(0, len(attributes)):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1])
return labels, attributes, records
| en | 0.576752 | The Tornado Framework
By <NAME>
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com This class is used to read a .arff file. | 3.130854 | 3 |
Experimente/Experiment ID 8/run-cifar10-v7.py | MichaelSchwabe/conv-ebnas-abgabe | 6 | 762 | from __future__ import print_function
from keras.datasets import mnist
from keras.datasets import cifar10
from keras.utils.np_utils import to_categorical
import numpy as np
from keras import backend as K
from evolution import Evolution
from genome_handler import GenomeHandler
import tensorflow as tf
#import mlflow.keras
#import mlflow
#import mlflow.tensorflow
#mlflow.tensorflow.autolog()
#mlflow.keras.autolog()
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
K.set_image_data_format("channels_last")
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],x_train.shape[3]).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3]).astype('float32') / 255
# nCLasses
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#y_train.shape
dataset = ((x_train, y_train), (x_test, y_test))
genome_handler = GenomeHandler(max_conv_layers=4,
max_dense_layers=2, # includes final dense layer
max_filters=512,
max_dense_nodes=1024,
input_shape=x_train.shape[1:],
n_classes=10)
evo = Evolution(genome_handler, data_path="log/evo_cifar10_gen40_pop10_e20.csv")
model = evo.run(dataset=dataset,
num_generations=40,
pop_size=10,
epochs=20,metric='acc')
#epochs=10,metric='loss')
print(model.summary()) | from __future__ import print_function
from keras.datasets import mnist
from keras.datasets import cifar10
from keras.utils.np_utils import to_categorical
import numpy as np
from keras import backend as K
from evolution import Evolution
from genome_handler import GenomeHandler
import tensorflow as tf
#import mlflow.keras
#import mlflow
#import mlflow.tensorflow
#mlflow.tensorflow.autolog()
#mlflow.keras.autolog()
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
K.set_image_data_format("channels_last")
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],x_train.shape[3]).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3]).astype('float32') / 255
# nCLasses
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#y_train.shape
dataset = ((x_train, y_train), (x_test, y_test))
genome_handler = GenomeHandler(max_conv_layers=4,
max_dense_layers=2, # includes final dense layer
max_filters=512,
max_dense_nodes=1024,
input_shape=x_train.shape[1:],
n_classes=10)
evo = Evolution(genome_handler, data_path="log/evo_cifar10_gen40_pop10_e20.csv")
model = evo.run(dataset=dataset,
num_generations=40,
pop_size=10,
epochs=20,metric='acc')
#epochs=10,metric='loss')
print(model.summary()) | en | 0.424183 | #import mlflow.keras #import mlflow #import mlflow.tensorflow #mlflow.tensorflow.autolog() #mlflow.keras.autolog() #(x_train, y_train), (x_test, y_test) = mnist.load_data() # nCLasses #y_train.shape # includes final dense layer #epochs=10,metric='loss') | 2.276037 | 2 |
CarModel.py | JaredFG/Multiagentes-Unity | 0 | 763 | '''
Autores:<NAME> A01749381
<NAME> A01751192
<NAME> A01379868
<NAME> A01749375
'''
from random import random
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from mesa.batchrunner import BatchRunner
from mesa.datacollection import DataCollector
from mesa.space import MultiGrid
from mesa import Agent , Model
from mesa.time import RandomActivation
#Clase para crear a los agentes automóviles
class CarAgent(Agent):
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.next_cell = None
self.direction = None
self.agent_type = 0
#Función para validar si la posición es válida, en caso de que sea válida regresa True, en caso contrario
#regresa False.
def is_valid(self, position):
if position[0] < self.model.width and position[1] < self.model.height and position[0] >= 0 and position[1] >= 0:
if not self.model.grid.is_cell_empty(position):
return True
return False
#Función para recibir las posibles celdas a dónde moverse, regresa la posición de la calle.
def get_poss_cell(self):
neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False)
for cell in neighborhood:
for agent in self.model.grid.get_cell_list_contents(cell):
if agent.agent_type == 2:
next_dir = (self.pos[0] - agent.pos[0], self.pos[1] - agent.pos[1])
if next_dir[0] * -1 != self.direction[0] and next_dir[1] * -1 != self.direction[1]:
return agent.pos
#Función para avanzar hacia el frente, regresa el valor de la variable move que son coordenadas.
def get_nextcell(self):
move = (self.pos[0] + self.direction[0], self.pos[1] + self.direction[1])
return move
#Función para obtener la dirección hacia donde debe moverse el automóvil, regresa la dirección
# de la calle.
def get_nextdirect(self, position):
for agent in self.model.grid.get_cell_list_contents(position):
if agent.agent_type == 2:
return agent.direction
#Función para dar vuelta, regresa la dirección de la calle.
def turn(self):
for cell in self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False):
for agent in self.model.grid.get_cell_list_contents(cell):
if agent.agent_type == 2:
if agent.direction != self.direction:
return agent.direction
return None
#Función para revisar la luz de los semáforos, regresa la luz del semáforo en caso
# de que el automóvil tenga uno de vecino. En caso contrario regresa True.
def check_light(self):
for agent in self.model.grid.get_cell_list_contents(self.next_cell):
if agent.agent_type == 1:
return agent.light
return True
#Función para checar si hay otro automovil enfrente, regresa un valor booleano.
def check_car(self):
for agent in self.model.grid.get_cell_list_contents(self.next_cell):
if agent.agent_type == 0:
return False
return True
def step(self):
#Variable para guardar el resultado de la función get_nextcell().
next_cell = self.get_nextcell()
#Condición, si la siguiente celda es válida, se guarda en el automóvil y se cambia su dirección.
if self.is_valid(next_cell):
self.next_cell = next_cell
self.direction = self.get_nextdirect(self.next_cell)
#En caso contrario una varible guarda el resultado de la función turn().
else:
direction = self.turn()
#Condición, si la variable direction es verdadera se cambia la dirección del automóvil.
if direction:
self.direction = direction
#En caso contrario una variable guarda el resultado de la función get_poss_cell().
#La siguiente celda del automóvil cambia al valor de la variable.
else:
poss = self.get_poss_cell()
self.next_cell = poss
if self.check_car():
if self.check_light():
self.model.grid.move_agent(self, self.next_cell)
#Clase para crear a los agentes semáforos.
class TrafficLightAgent(Agent):
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.agent_type = 1
self.light = False
#Función para cambiar la luz de los semáforos.
def change(self):
self.light = not self.light
#Función para contar el número de automóviles que hay en un semáforo,
# regresa el contador con el número de automóviles.
def count_cars(self):
counter = 0
neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=True)
for cell in neighborhood:
for agent in self.model.grid.get_cell_list_contents(cell):
if agent.agent_type == 0:
counter += 1
return counter
#Clase para crear a los agentes calle.
class StreetAgent(Agent):
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.direction = None
self.agent_type = 2
#Clase para crear el modelo.
class CarModel(Model):
def __init__(self, N: int, width: int, height: int):
self.num_agents = N
self.running = True
self.grid = MultiGrid(width, height, False)
self.schedule = RandomActivation(self)
self.uids = 0
self.lights_ids = 0
self.width = width
self.height = height
street_pos = []
self.lights = 4
#Loop para crear la parte interior de las calles, donde está el cruce.
for row in range(height):
for col in range(width):
agent = StreetAgent(self.uids, self)
self.uids += 1
flag = True
if col > width // 2 - 2 and col < width // 2 + 1 and col > 1 and col < height - 1:
if row >= height // 2:
agent.direction = (0, 1)
else:
agent.direction = (0, -1)
elif row > height // 2 - 2 and row < height // 2 + 1 and row > 1 and row < width - 1:
if col > width // 2:
agent.direction = (-1, 0)
else:
agent.direction = (1, 0)
else:
flag = False
if flag:
self.grid.place_agent(agent, (col, row))
street_pos.append((col, row))
#Loop para crear la parte exterior de las calles, donde NO está el cruce.
for row in range(height):
for col in range(width):
agent = StreetAgent(self.uids, self)
self.uids += 1
flag = True
if row < 2:
if col < width - 2:
agent.direction = (1, 0)
else:
agent.direction = (0, 1)
elif row >= 2 and row < height - 2:
if col < 2:
agent.direction = (0, -1)
elif col >= width - 2 and col < width:
agent.direction = (0, 1)
else:
flag = False
elif row >= height -2 and row < height:
if col < width - 2:
agent.direction = (-1, 0)
else:
agent.direction = (0, 1)
else:
flag = False
if flag:
self.grid.place_agent(agent, (col, row))
street_pos.append((col, row))
#Loop para crear los automóviles en posiciones random donde hay calle.
for i in range(self.num_agents):
a = CarAgent(self.uids, self)
self.uids += 1
pos_index = self.random.randint(0, len(street_pos) - 1)
pos = street_pos.pop(pos_index)
a.direction = self.grid.get_cell_list_contents(pos)[0].direction
self.grid.place_agent(a, pos)
self.schedule.add(a)
#Crear los semáforos
for i in range(self.lights):
alight = TrafficLightAgent(self.lights_ids, self)
self.lights_ids += 1
self.schedule.add(alight)
x = 8
y = 9
if i == 0:
alight.light = True
self.grid.place_agent(alight, (x, y))
elif i == 1:
x = 8
y = 10
alight.light = True
self.grid.place_agent(alight, (x, y))
elif i == 2:
x = 11
y = 9
alight.light = False
self.grid.place_agent(alight, (x, y))
else:
x = 11
y = 10
alight.light = False
self.grid.place_agent(alight, (x, y))
def step(self):
#Contadores para saber cuáles semáforos tienen más automóviles.
count_left = 0
count_right = 0
#Loop para añadir a los contadores la cantidad de automóviles que hay en cada lado.
for agent in self.schedule.agents:
if agent.agent_type == 1:
if agent.unique_id == 0:
count_left += agent.count_cars()
elif agent.unique_id == 1:
count_left += agent.count_cars()
elif agent.unique_id == 2:
count_right += agent.count_cars()
elif agent.unique_id == 3:
count_right += agent.count_cars()
#Condición, si el lado izquierdo tiene más automóviles, los semáforos del lado izquierdo
#dan luz verde y los semáforos del lado derecho dan luz roja.
if count_left >= count_right:
for agent in self.schedule.agents:
if agent.agent_type == 1:
if agent.unique_id == 0:
agent.light = True
elif agent.unique_id == 1:
agent.light = True
elif agent.unique_id == 2:
agent.light = False
else:
agent.light = False
#En caso contrario los semáforos del lado derecho dan luz verde y los semáforos del lado
#izquierdo dan luz roja.
else:
for agent in self.schedule.agents:
if agent.agent_type == 1:
if agent.unique_id == 0:
agent.light = False
elif agent.unique_id == 1:
agent.light = False
elif agent.unique_id == 2:
agent.light = True
else:
agent.light = True
self.schedule.step() | '''
Autores:<NAME> A01749381
<NAME> A01751192
<NAME> A01379868
<NAME> A01749375
'''
from random import random
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from mesa.batchrunner import BatchRunner
from mesa.datacollection import DataCollector
from mesa.space import MultiGrid
from mesa import Agent , Model
from mesa.time import RandomActivation
#Clase para crear a los agentes automóviles
class CarAgent(Agent):
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.next_cell = None
self.direction = None
self.agent_type = 0
#Función para validar si la posición es válida, en caso de que sea válida regresa True, en caso contrario
#regresa False.
def is_valid(self, position):
if position[0] < self.model.width and position[1] < self.model.height and position[0] >= 0 and position[1] >= 0:
if not self.model.grid.is_cell_empty(position):
return True
return False
#Función para recibir las posibles celdas a dónde moverse, regresa la posición de la calle.
def get_poss_cell(self):
neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False)
for cell in neighborhood:
for agent in self.model.grid.get_cell_list_contents(cell):
if agent.agent_type == 2:
next_dir = (self.pos[0] - agent.pos[0], self.pos[1] - agent.pos[1])
if next_dir[0] * -1 != self.direction[0] and next_dir[1] * -1 != self.direction[1]:
return agent.pos
#Función para avanzar hacia el frente, regresa el valor de la variable move que son coordenadas.
def get_nextcell(self):
move = (self.pos[0] + self.direction[0], self.pos[1] + self.direction[1])
return move
#Función para obtener la dirección hacia donde debe moverse el automóvil, regresa la dirección
# de la calle.
def get_nextdirect(self, position):
for agent in self.model.grid.get_cell_list_contents(position):
if agent.agent_type == 2:
return agent.direction
#Función para dar vuelta, regresa la dirección de la calle.
def turn(self):
for cell in self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False):
for agent in self.model.grid.get_cell_list_contents(cell):
if agent.agent_type == 2:
if agent.direction != self.direction:
return agent.direction
return None
#Función para revisar la luz de los semáforos, regresa la luz del semáforo en caso
# de que el automóvil tenga uno de vecino. En caso contrario regresa True.
def check_light(self):
for agent in self.model.grid.get_cell_list_contents(self.next_cell):
if agent.agent_type == 1:
return agent.light
return True
#Función para checar si hay otro automovil enfrente, regresa un valor booleano.
def check_car(self):
for agent in self.model.grid.get_cell_list_contents(self.next_cell):
if agent.agent_type == 0:
return False
return True
def step(self):
#Variable para guardar el resultado de la función get_nextcell().
next_cell = self.get_nextcell()
#Condición, si la siguiente celda es válida, se guarda en el automóvil y se cambia su dirección.
if self.is_valid(next_cell):
self.next_cell = next_cell
self.direction = self.get_nextdirect(self.next_cell)
#En caso contrario una varible guarda el resultado de la función turn().
else:
direction = self.turn()
#Condición, si la variable direction es verdadera se cambia la dirección del automóvil.
if direction:
self.direction = direction
#En caso contrario una variable guarda el resultado de la función get_poss_cell().
#La siguiente celda del automóvil cambia al valor de la variable.
else:
poss = self.get_poss_cell()
self.next_cell = poss
if self.check_car():
if self.check_light():
self.model.grid.move_agent(self, self.next_cell)
#Clase para crear a los agentes semáforos.
class TrafficLightAgent(Agent):
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.agent_type = 1
self.light = False
#Función para cambiar la luz de los semáforos.
def change(self):
self.light = not self.light
#Función para contar el número de automóviles que hay en un semáforo,
# regresa el contador con el número de automóviles.
def count_cars(self):
counter = 0
neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=True)
for cell in neighborhood:
for agent in self.model.grid.get_cell_list_contents(cell):
if agent.agent_type == 0:
counter += 1
return counter
#Clase para crear a los agentes calle.
class StreetAgent(Agent):
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.direction = None
self.agent_type = 2
#Clase para crear el modelo.
class CarModel(Model):
def __init__(self, N: int, width: int, height: int):
self.num_agents = N
self.running = True
self.grid = MultiGrid(width, height, False)
self.schedule = RandomActivation(self)
self.uids = 0
self.lights_ids = 0
self.width = width
self.height = height
street_pos = []
self.lights = 4
#Loop para crear la parte interior de las calles, donde está el cruce.
for row in range(height):
for col in range(width):
agent = StreetAgent(self.uids, self)
self.uids += 1
flag = True
if col > width // 2 - 2 and col < width // 2 + 1 and col > 1 and col < height - 1:
if row >= height // 2:
agent.direction = (0, 1)
else:
agent.direction = (0, -1)
elif row > height // 2 - 2 and row < height // 2 + 1 and row > 1 and row < width - 1:
if col > width // 2:
agent.direction = (-1, 0)
else:
agent.direction = (1, 0)
else:
flag = False
if flag:
self.grid.place_agent(agent, (col, row))
street_pos.append((col, row))
#Loop para crear la parte exterior de las calles, donde NO está el cruce.
for row in range(height):
for col in range(width):
agent = StreetAgent(self.uids, self)
self.uids += 1
flag = True
if row < 2:
if col < width - 2:
agent.direction = (1, 0)
else:
agent.direction = (0, 1)
elif row >= 2 and row < height - 2:
if col < 2:
agent.direction = (0, -1)
elif col >= width - 2 and col < width:
agent.direction = (0, 1)
else:
flag = False
elif row >= height -2 and row < height:
if col < width - 2:
agent.direction = (-1, 0)
else:
agent.direction = (0, 1)
else:
flag = False
if flag:
self.grid.place_agent(agent, (col, row))
street_pos.append((col, row))
#Loop para crear los automóviles en posiciones random donde hay calle.
for i in range(self.num_agents):
a = CarAgent(self.uids, self)
self.uids += 1
pos_index = self.random.randint(0, len(street_pos) - 1)
pos = street_pos.pop(pos_index)
a.direction = self.grid.get_cell_list_contents(pos)[0].direction
self.grid.place_agent(a, pos)
self.schedule.add(a)
#Crear los semáforos
for i in range(self.lights):
alight = TrafficLightAgent(self.lights_ids, self)
self.lights_ids += 1
self.schedule.add(alight)
x = 8
y = 9
if i == 0:
alight.light = True
self.grid.place_agent(alight, (x, y))
elif i == 1:
x = 8
y = 10
alight.light = True
self.grid.place_agent(alight, (x, y))
elif i == 2:
x = 11
y = 9
alight.light = False
self.grid.place_agent(alight, (x, y))
else:
x = 11
y = 10
alight.light = False
self.grid.place_agent(alight, (x, y))
def step(self):
#Contadores para saber cuáles semáforos tienen más automóviles.
count_left = 0
count_right = 0
#Loop para añadir a los contadores la cantidad de automóviles que hay en cada lado.
for agent in self.schedule.agents:
if agent.agent_type == 1:
if agent.unique_id == 0:
count_left += agent.count_cars()
elif agent.unique_id == 1:
count_left += agent.count_cars()
elif agent.unique_id == 2:
count_right += agent.count_cars()
elif agent.unique_id == 3:
count_right += agent.count_cars()
#Condición, si el lado izquierdo tiene más automóviles, los semáforos del lado izquierdo
#dan luz verde y los semáforos del lado derecho dan luz roja.
if count_left >= count_right:
for agent in self.schedule.agents:
if agent.agent_type == 1:
if agent.unique_id == 0:
agent.light = True
elif agent.unique_id == 1:
agent.light = True
elif agent.unique_id == 2:
agent.light = False
else:
agent.light = False
#En caso contrario los semáforos del lado derecho dan luz verde y los semáforos del lado
#izquierdo dan luz roja.
else:
for agent in self.schedule.agents:
if agent.agent_type == 1:
if agent.unique_id == 0:
agent.light = False
elif agent.unique_id == 1:
agent.light = False
elif agent.unique_id == 2:
agent.light = True
else:
agent.light = True
self.schedule.step() | es | 0.975984 | Autores:<NAME> A01749381 <NAME> A01751192 <NAME> A01379868 <NAME> A01749375 #Clase para crear a los agentes automóviles #Función para validar si la posición es válida, en caso de que sea válida regresa True, en caso contrario #regresa False. #Función para recibir las posibles celdas a dónde moverse, regresa la posición de la calle. #Función para avanzar hacia el frente, regresa el valor de la variable move que son coordenadas. #Función para obtener la dirección hacia donde debe moverse el automóvil, regresa la dirección # de la calle. #Función para dar vuelta, regresa la dirección de la calle. #Función para revisar la luz de los semáforos, regresa la luz del semáforo en caso # de que el automóvil tenga uno de vecino. En caso contrario regresa True. #Función para checar si hay otro automovil enfrente, regresa un valor booleano. #Variable para guardar el resultado de la función get_nextcell(). #Condición, si la siguiente celda es válida, se guarda en el automóvil y se cambia su dirección. #En caso contrario una varible guarda el resultado de la función turn(). #Condición, si la variable direction es verdadera se cambia la dirección del automóvil. #En caso contrario una variable guarda el resultado de la función get_poss_cell(). #La siguiente celda del automóvil cambia al valor de la variable. #Clase para crear a los agentes semáforos. #Función para cambiar la luz de los semáforos. #Función para contar el número de automóviles que hay en un semáforo, # regresa el contador con el número de automóviles. #Clase para crear a los agentes calle. #Clase para crear el modelo. #Loop para crear la parte interior de las calles, donde está el cruce. #Loop para crear la parte exterior de las calles, donde NO está el cruce. #Loop para crear los automóviles en posiciones random donde hay calle. #Crear los semáforos #Contadores para saber cuáles semáforos tienen más automóviles. #Loop para añadir a los contadores la cantidad de automóviles que hay en cada lado. #Condición, si el lado izquierdo tiene más automóviles, los semáforos del lado izquierdo #dan luz verde y los semáforos del lado derecho dan luz roja. #En caso contrario los semáforos del lado derecho dan luz verde y los semáforos del lado #izquierdo dan luz roja. | 2.27484 | 2 |
qcodes/widgets/display.py | nulinspiratie/Qcodes | 2 | 764 | <gh_stars>1-10
"""Helper for adding content stored in a file to a jupyter notebook."""
import os
from pkg_resources import resource_string
from IPython.display import display, Javascript, HTML
# Originally I implemented this using regular open() and read(), so it
# could use relative paths from the importing file.
#
# But for distributable packages, pkg_resources.resource_string is the
# best way to load data files, because it works even if the package is
# in an egg or zip file. See:
# http://pythonhosted.org/setuptools/setuptools.html#accessing-data-files-at-runtime
def display_auto(qcodes_path, file_type=None):
"""
Display some javascript, css, or html content in a jupyter notebook.
Content comes from a package-relative file path. Will use the file
extension to determine file type unless overridden by file_type
Args:
qcodes_path (str): the path to the target file within the qcodes
package, like 'widgets/widgets.js'
file_type (Optional[str]): Override the file extension to determine
what type of file this is. Case insensitive, supported values
are 'js', 'css', and 'html'
"""
contents = resource_string('qcodes', qcodes_path).decode('utf-8')
if file_type is None:
ext = os.path.splitext(qcodes_path)[1].lower()
elif 'js' in file_type.lower():
ext = '.js'
elif 'css' in file_type.lower():
ext = '.css'
else:
ext = '.html'
if ext == '.js':
display(Javascript(contents))
elif ext == '.css':
display(HTML('<style>' + contents + '</style>'))
else:
# default to html. Anything else?
display(HTML(contents))
| """Helper for adding content stored in a file to a jupyter notebook."""
import os
from pkg_resources import resource_string
from IPython.display import display, Javascript, HTML
# Originally I implemented this using regular open() and read(), so it
# could use relative paths from the importing file.
#
# But for distributable packages, pkg_resources.resource_string is the
# best way to load data files, because it works even if the package is
# in an egg or zip file. See:
# http://pythonhosted.org/setuptools/setuptools.html#accessing-data-files-at-runtime
def display_auto(qcodes_path, file_type=None):
"""
Display some javascript, css, or html content in a jupyter notebook.
Content comes from a package-relative file path. Will use the file
extension to determine file type unless overridden by file_type
Args:
qcodes_path (str): the path to the target file within the qcodes
package, like 'widgets/widgets.js'
file_type (Optional[str]): Override the file extension to determine
what type of file this is. Case insensitive, supported values
are 'js', 'css', and 'html'
"""
contents = resource_string('qcodes', qcodes_path).decode('utf-8')
if file_type is None:
ext = os.path.splitext(qcodes_path)[1].lower()
elif 'js' in file_type.lower():
ext = '.js'
elif 'css' in file_type.lower():
ext = '.css'
else:
ext = '.html'
if ext == '.js':
display(Javascript(contents))
elif ext == '.css':
display(HTML('<style>' + contents + '</style>'))
else:
# default to html. Anything else?
display(HTML(contents)) | en | 0.761061 | Helper for adding content stored in a file to a jupyter notebook. # Originally I implemented this using regular open() and read(), so it # could use relative paths from the importing file. # # But for distributable packages, pkg_resources.resource_string is the # best way to load data files, because it works even if the package is # in an egg or zip file. See: # http://pythonhosted.org/setuptools/setuptools.html#accessing-data-files-at-runtime Display some javascript, css, or html content in a jupyter notebook. Content comes from a package-relative file path. Will use the file extension to determine file type unless overridden by file_type Args: qcodes_path (str): the path to the target file within the qcodes package, like 'widgets/widgets.js' file_type (Optional[str]): Override the file extension to determine what type of file this is. Case insensitive, supported values are 'js', 'css', and 'html' # default to html. Anything else? | 3.163329 | 3 |
hubcontrol.py | smr99/lego-hub-tk | 16 | 765 | <reponame>smr99/lego-hub-tk<filename>hubcontrol.py
#! /usr/bin/python3
import base64
from data.ProgramHubLogger import ProgramHubLogger
from datetime import datetime
import logging
import os
import sys
from ui.MotionSensor import MotionSensorWidget
from ui.PositionStatus import PositionStatusWidget
from ui.DevicePortWidget import DevicePortWidget
from ui.ConnectionWidget import ConnectionWidget
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import QApplication, QPushButton, QWidget
from comm.HubClient import ConnectionState, HubClient
from data.HubMonitor import HubMonitor
from data.HubStatus import HubStatus
from ui.DeviceStatusWidget import DeviceStatusWidget
from utils.setup import setup_logging
logger = logging.getLogger("App")
log_filename = os.path.dirname(__file__) + "/logs/hubcontrol.log"
setup_logging(log_filename)
def list_programs(info):
storage = info['storage']
slots = info['slots']
print("%4s %-40s %6s %-20s %-12s %-10s" % ("Slot", "Decoded Name", "Size", "Last Modified", "Project_id", "Type"))
for i in range(20):
if str(i) in slots:
sl = slots[str(i)]
modified = datetime.utcfromtimestamp(sl['modified']/1000).strftime('%Y-%m-%d %H:%M:%S')
try:
decoded_name = base64.b64decode(sl['name']).decode('utf-8')
except:
decoded_name = sl['name']
try:
project = sl['project_id']
except:
project = " "
try:
type = sl['type']
except:
type = " "
print("%4s %-40s %5db %-20s %-12s %-10s" % (i, decoded_name, sl['size'], modified, project, type))
print(("Storage free %s%s of total %s%s" % (storage['free'], storage['unit'], storage['total'], storage['unit'])))
class ConsoleWidget(QTextEdit):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setReadOnly(True)
self.setLineWrapMode(QTextEdit.NoWrap)
def append(self, text):
self.moveCursor(QTextCursor.End)
self.insertPlainText(text)
sb = self.verticalScrollBar()
sb.setValue(sb.maximum())
def append_line(self, text): self.append(text + '\n')
class ProgramWidget(QWidget):
def __init__(self, hub_client : HubClient, hub_monitor : HubMonitor, *args, **kwargs):
super().__init__(*args, **kwargs)
self._client = hub_client
self._monitor = hub_monitor
self._executing_program_label = QLabel()
self._slot_spinbox = QSpinBox()
self._run_button = QPushButton('Run')
self._run_button.clicked.connect(self.run_program)
self._stop_button = QPushButton('Stop')
self._stop_button.clicked.connect(self.stop_program)
runstop_widget = QWidget()
layout = QHBoxLayout(runstop_widget)
layout.addWidget(QLabel('Slot:'))
layout.addWidget(self._slot_spinbox)
layout.addWidget(self._run_button)
layout.addWidget(self._stop_button)
box = QGroupBox('Program Execution')
layout = QFormLayout(box)
layout.addRow('Executing Program ID:', self._executing_program_label)
layout.addRow(runstop_widget)
layout = QVBoxLayout()
layout.addWidget(box)
self.setLayout(layout)
def refresh(self):
is_connected = self._client.state == ConnectionState.TELEMETRY
self._executing_program_label.setText(self._monitor.execution_status[0])
self._run_button.setEnabled(is_connected)
self._stop_button.setEnabled(is_connected)
def run_program(self):
slot = self._slot_spinbox.value()
r = self._client.program_execute(slot)
logger.debug('Program execute returns: %s', r)
def stop_program(self):
r = self._client.program_terminate()
logger.debug('Program terminate returns: %s', r)
class MainWindow(QMainWindow):
def __init__(self, hub_client, hub_monitor, *args, **kwargs):
super().__init__(*args, **kwargs)
status = hub_monitor.status
self._client = hub_client
self._hub_monitor = hub_monitor
self.position_widget = PositionStatusWidget(status)
self.motion_widget = MotionSensorWidget(status)
self.program_widget = ProgramWidget(hub_client, hub_monitor)
self.port_widget = DevicePortWidget(status)
self.console = ConsoleWidget()
self.list_button = QPushButton('List')
self.list_button.clicked.connect(self.list_programs)
# Top row (status)
top_box = QWidget()
layout = QHBoxLayout(top_box)
layout.addWidget(ConnectionWidget(hub_client))
layout.addWidget(self.position_widget)
layout.addWidget(self.motion_widget)
# Button bar
buttons = QWidget()
layout = QHBoxLayout(buttons)
layout.addWidget(self.list_button)
mw = QWidget()
layout = QVBoxLayout(mw)
layout.addWidget(top_box)
layout.addWidget(buttons)
layout.addWidget(self.program_widget)
layout.addWidget(self.port_widget)
layout.addWidget(self.console)
self.setCentralWidget(mw)
hub_monitor.events.console_print += self.console.append
# Timer refresh trick from https://github.com/Taar2/pyqt5-modelview-tutorial/blob/master/modelview_3.py
# this trick is used to work around the issue of updating UI from background threads -- i.e. events
# raised by HubClient.
timer = QtCore.QTimer(self)
timer.setInterval(200)
timer.timeout.connect(self.refresh)
timer.start()
def refresh(self):
is_connected = self._client.state == ConnectionState.TELEMETRY
is_connected_usb = is_connected and self._hub_monitor.status.is_usb_connected
self.list_button.setEnabled(is_connected_usb)
self.position_widget.refresh()
self.motion_widget.refresh()
self.port_widget.refresh()
self.program_widget.refresh()
def list_programs(self):
storage_status = self._client.get_storage_status()
if storage_status is not None:
list_programs(storage_status)
def run_program(self):
slot = 4
r = self._client.program_execute(slot)
print('Program execute returns: ', r)
logger.info("LEGO status app starting up")
hc = HubClient()
monitor = HubMonitor(hc)
monitor.logger = ProgramHubLogger('logs/program')
app = QApplication(sys.argv)
window = MainWindow(hc, monitor)
window.setWindowTitle('LEGO Hub Status')
window.show()
hc.start()
sys.exit(app.exec_())
| #! /usr/bin/python3
import base64
from data.ProgramHubLogger import ProgramHubLogger
from datetime import datetime
import logging
import os
import sys
from ui.MotionSensor import MotionSensorWidget
from ui.PositionStatus import PositionStatusWidget
from ui.DevicePortWidget import DevicePortWidget
from ui.ConnectionWidget import ConnectionWidget
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import QApplication, QPushButton, QWidget
from comm.HubClient import ConnectionState, HubClient
from data.HubMonitor import HubMonitor
from data.HubStatus import HubStatus
from ui.DeviceStatusWidget import DeviceStatusWidget
from utils.setup import setup_logging
logger = logging.getLogger("App")
log_filename = os.path.dirname(__file__) + "/logs/hubcontrol.log"
setup_logging(log_filename)
def list_programs(info):
storage = info['storage']
slots = info['slots']
print("%4s %-40s %6s %-20s %-12s %-10s" % ("Slot", "Decoded Name", "Size", "Last Modified", "Project_id", "Type"))
for i in range(20):
if str(i) in slots:
sl = slots[str(i)]
modified = datetime.utcfromtimestamp(sl['modified']/1000).strftime('%Y-%m-%d %H:%M:%S')
try:
decoded_name = base64.b64decode(sl['name']).decode('utf-8')
except:
decoded_name = sl['name']
try:
project = sl['project_id']
except:
project = " "
try:
type = sl['type']
except:
type = " "
print("%4s %-40s %5db %-20s %-12s %-10s" % (i, decoded_name, sl['size'], modified, project, type))
print(("Storage free %s%s of total %s%s" % (storage['free'], storage['unit'], storage['total'], storage['unit'])))
class ConsoleWidget(QTextEdit):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setReadOnly(True)
self.setLineWrapMode(QTextEdit.NoWrap)
def append(self, text):
self.moveCursor(QTextCursor.End)
self.insertPlainText(text)
sb = self.verticalScrollBar()
sb.setValue(sb.maximum())
def append_line(self, text): self.append(text + '\n')
class ProgramWidget(QWidget):
def __init__(self, hub_client : HubClient, hub_monitor : HubMonitor, *args, **kwargs):
super().__init__(*args, **kwargs)
self._client = hub_client
self._monitor = hub_monitor
self._executing_program_label = QLabel()
self._slot_spinbox = QSpinBox()
self._run_button = QPushButton('Run')
self._run_button.clicked.connect(self.run_program)
self._stop_button = QPushButton('Stop')
self._stop_button.clicked.connect(self.stop_program)
runstop_widget = QWidget()
layout = QHBoxLayout(runstop_widget)
layout.addWidget(QLabel('Slot:'))
layout.addWidget(self._slot_spinbox)
layout.addWidget(self._run_button)
layout.addWidget(self._stop_button)
box = QGroupBox('Program Execution')
layout = QFormLayout(box)
layout.addRow('Executing Program ID:', self._executing_program_label)
layout.addRow(runstop_widget)
layout = QVBoxLayout()
layout.addWidget(box)
self.setLayout(layout)
def refresh(self):
is_connected = self._client.state == ConnectionState.TELEMETRY
self._executing_program_label.setText(self._monitor.execution_status[0])
self._run_button.setEnabled(is_connected)
self._stop_button.setEnabled(is_connected)
def run_program(self):
slot = self._slot_spinbox.value()
r = self._client.program_execute(slot)
logger.debug('Program execute returns: %s', r)
def stop_program(self):
r = self._client.program_terminate()
logger.debug('Program terminate returns: %s', r)
class MainWindow(QMainWindow):
def __init__(self, hub_client, hub_monitor, *args, **kwargs):
super().__init__(*args, **kwargs)
status = hub_monitor.status
self._client = hub_client
self._hub_monitor = hub_monitor
self.position_widget = PositionStatusWidget(status)
self.motion_widget = MotionSensorWidget(status)
self.program_widget = ProgramWidget(hub_client, hub_monitor)
self.port_widget = DevicePortWidget(status)
self.console = ConsoleWidget()
self.list_button = QPushButton('List')
self.list_button.clicked.connect(self.list_programs)
# Top row (status)
top_box = QWidget()
layout = QHBoxLayout(top_box)
layout.addWidget(ConnectionWidget(hub_client))
layout.addWidget(self.position_widget)
layout.addWidget(self.motion_widget)
# Button bar
buttons = QWidget()
layout = QHBoxLayout(buttons)
layout.addWidget(self.list_button)
mw = QWidget()
layout = QVBoxLayout(mw)
layout.addWidget(top_box)
layout.addWidget(buttons)
layout.addWidget(self.program_widget)
layout.addWidget(self.port_widget)
layout.addWidget(self.console)
self.setCentralWidget(mw)
hub_monitor.events.console_print += self.console.append
# Timer refresh trick from https://github.com/Taar2/pyqt5-modelview-tutorial/blob/master/modelview_3.py
# this trick is used to work around the issue of updating UI from background threads -- i.e. events
# raised by HubClient.
timer = QtCore.QTimer(self)
timer.setInterval(200)
timer.timeout.connect(self.refresh)
timer.start()
def refresh(self):
is_connected = self._client.state == ConnectionState.TELEMETRY
is_connected_usb = is_connected and self._hub_monitor.status.is_usb_connected
self.list_button.setEnabled(is_connected_usb)
self.position_widget.refresh()
self.motion_widget.refresh()
self.port_widget.refresh()
self.program_widget.refresh()
def list_programs(self):
storage_status = self._client.get_storage_status()
if storage_status is not None:
list_programs(storage_status)
def run_program(self):
slot = 4
r = self._client.program_execute(slot)
print('Program execute returns: ', r)
logger.info("LEGO status app starting up")
hc = HubClient()
monitor = HubMonitor(hc)
monitor.logger = ProgramHubLogger('logs/program')
app = QApplication(sys.argv)
window = MainWindow(hc, monitor)
window.setWindowTitle('LEGO Hub Status')
window.show()
hc.start()
sys.exit(app.exec_()) | en | 0.751339 | #! /usr/bin/python3 # Top row (status) # Button bar # Timer refresh trick from https://github.com/Taar2/pyqt5-modelview-tutorial/blob/master/modelview_3.py # this trick is used to work around the issue of updating UI from background threads -- i.e. events # raised by HubClient. | 2.106045 | 2 |
To-D0-App-main/base/views.py | shagun-agrawal/To-Do-App | 1 | 766 | from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.contrib.auth.views import LoginView
from .models import Task
# Create your views here.
class CustomLoginView(LoginView):
template_name='base/login.html'
fiels='__all__'
redirect_auhenticated_user = True
def get_success_url(self):
return reverse_lazy('tasks')
class TaskList(ListView):
model = Task
context_object_name = 'tasks'
class TaskDetail(DetailView):
model = Task
context_object_name = 'task'
class TaskCreate(CreateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks')
class TaskUpdate(UpdateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks')
class TaskDelete(DeleteView):
model = Task
context_object_name='Task'
success_url = reverse_lazy('tasks')
| from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.contrib.auth.views import LoginView
from .models import Task
# Create your views here.
class CustomLoginView(LoginView):
template_name='base/login.html'
fiels='__all__'
redirect_auhenticated_user = True
def get_success_url(self):
return reverse_lazy('tasks')
class TaskList(ListView):
model = Task
context_object_name = 'tasks'
class TaskDetail(DetailView):
model = Task
context_object_name = 'task'
class TaskCreate(CreateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks')
class TaskUpdate(UpdateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks')
class TaskDelete(DeleteView):
model = Task
context_object_name='Task'
success_url = reverse_lazy('tasks')
| en | 0.968116 | # Create your views here. | 2.162498 | 2 |
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py | samysweb/dnnv | 5 | 767 | <filename>tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
import numpy as np
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
TOL = 1e-6
def test_Dropout_consts():
x = np.array([3, 4]).astype(np.float32)
op = Dropout(x)
tf_op = TensorflowConverter().visit(op)
result_ = tf_op()
assert isinstance(result_, tuple)
assert len(result_) == 2
result, none = result_
assert none is None
y = np.array([3, 4]).astype(np.float32)
assert np.all(result >= (y - TOL))
assert np.all(result <= (y + TOL))
def test_Dropout_x_is_op():
x = np.array([3, 4]).astype(np.float32)
input_op = Input((2,), np.dtype(np.float32))
op = Dropout(input_op)
tf_op = TensorflowConverter().visit(op)
result_ = tf_op(x)
assert isinstance(result_, tuple)
assert len(result_) == 2
result, none = result_
assert none is None
y = np.array([3, 4]).astype(np.float32)
assert np.all(result >= (y - TOL))
assert np.all(result <= (y + TOL))
| <filename>tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
import numpy as np
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
TOL = 1e-6
def test_Dropout_consts():
x = np.array([3, 4]).astype(np.float32)
op = Dropout(x)
tf_op = TensorflowConverter().visit(op)
result_ = tf_op()
assert isinstance(result_, tuple)
assert len(result_) == 2
result, none = result_
assert none is None
y = np.array([3, 4]).astype(np.float32)
assert np.all(result >= (y - TOL))
assert np.all(result <= (y + TOL))
def test_Dropout_x_is_op():
x = np.array([3, 4]).astype(np.float32)
input_op = Input((2,), np.dtype(np.float32))
op = Dropout(input_op)
tf_op = TensorflowConverter().visit(op)
result_ = tf_op(x)
assert isinstance(result_, tuple)
assert len(result_) == 2
result, none = result_
assert none is None
y = np.array([3, 4]).astype(np.float32)
assert np.all(result >= (y - TOL))
assert np.all(result <= (y + TOL))
| none | 1 | 2.452384 | 2 |
|
smarts/zoo/worker.py | idsc-frazzoli/SMARTS | 554 | 768 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Run an agent in it's own (independent) process.
What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process.
To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process.
This script is called from within SMARTS to instantiate a remote agent.
The protocal is as follows:
1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent
2. worker.py will begin listening on port 5467.
3. SMARTS connects to (ip, 5467) as a client.
4. SMARTS calls `build()` rpc with `AgentSpec` as input.
5. worker.py recieves the `AgentSpec` instances and builds the Agent.
6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py.
"""
import argparse
import importlib
import logging
import os
import signal
import sys
from concurrent import futures
import grpc
from smarts.zoo import worker_pb2_grpc, worker_servicer
# Front-load some expensive imports as to not block the simulation
modules = [
"smarts.core.utils.pybullet",
"smarts.core.utils.sumo",
"smarts.core.sumo_road_network",
"numpy",
"sklearn",
"shapely",
"scipy",
"trimesh",
"panda3d",
"gym",
"ray",
]
for mod in modules:
try:
importlib.import_module(mod)
except ImportError:
if mod == "ray":
print(
"You need to install the ray dependency using pip install -e .[train] first"
)
if mod == "panda3d":
print(
"You need to install the panda3d dependency using pip install -e .[camera-obs] first"
)
pass
# End front-loaded imports
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(f"worker.py - pid({os.getpid()})")
def serve(port):
ip = "[::]"
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
worker_pb2_grpc.add_WorkerServicer_to_server(
worker_servicer.WorkerServicer(), server
)
server.add_insecure_port(f"{ip}:{port}")
server.start()
log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Started serving.")
def stop_server(unused_signum, unused_frame):
server.stop(0)
log.debug(
f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Received interrupt signal."
)
# Catch keyboard interrupt and terminate signal
signal.signal(signal.SIGINT, stop_server)
signal.signal(signal.SIGTERM, stop_server)
# Wait to receive server termination signal
server.wait_for_termination()
log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Server exited")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Run an agent in an independent process.")
parser.add_argument(
"--port",
type=int,
required=True,
help="Port to listen for remote client connections.",
)
args = parser.parse_args()
serve(args.port)
| # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Run an agent in it's own (independent) process.
What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process.
To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process.
This script is called from within SMARTS to instantiate a remote agent.
The protocal is as follows:
1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent
2. worker.py will begin listening on port 5467.
3. SMARTS connects to (ip, 5467) as a client.
4. SMARTS calls `build()` rpc with `AgentSpec` as input.
5. worker.py recieves the `AgentSpec` instances and builds the Agent.
6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py.
"""
import argparse
import importlib
import logging
import os
import signal
import sys
from concurrent import futures
import grpc
from smarts.zoo import worker_pb2_grpc, worker_servicer
# Front-load some expensive imports as to not block the simulation
modules = [
"smarts.core.utils.pybullet",
"smarts.core.utils.sumo",
"smarts.core.sumo_road_network",
"numpy",
"sklearn",
"shapely",
"scipy",
"trimesh",
"panda3d",
"gym",
"ray",
]
for mod in modules:
try:
importlib.import_module(mod)
except ImportError:
if mod == "ray":
print(
"You need to install the ray dependency using pip install -e .[train] first"
)
if mod == "panda3d":
print(
"You need to install the panda3d dependency using pip install -e .[camera-obs] first"
)
pass
# End front-loaded imports
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(f"worker.py - pid({os.getpid()})")
def serve(port):
ip = "[::]"
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
worker_pb2_grpc.add_WorkerServicer_to_server(
worker_servicer.WorkerServicer(), server
)
server.add_insecure_port(f"{ip}:{port}")
server.start()
log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Started serving.")
def stop_server(unused_signum, unused_frame):
server.stop(0)
log.debug(
f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Received interrupt signal."
)
# Catch keyboard interrupt and terminate signal
signal.signal(signal.SIGINT, stop_server)
signal.signal(signal.SIGTERM, stop_server)
# Wait to receive server termination signal
server.wait_for_termination()
log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Server exited")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Run an agent in an independent process.")
parser.add_argument(
"--port",
type=int,
required=True,
help="Port to listen for remote client connections.",
)
args = parser.parse_args()
serve(args.port)
| en | 0.857728 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. Run an agent in it's own (independent) process. What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process. To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process. This script is called from within SMARTS to instantiate a remote agent. The protocal is as follows: 1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent 2. worker.py will begin listening on port 5467. 3. SMARTS connects to (ip, 5467) as a client. 4. SMARTS calls `build()` rpc with `AgentSpec` as input. 5. worker.py recieves the `AgentSpec` instances and builds the Agent. 6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py. # Front-load some expensive imports as to not block the simulation # End front-loaded imports # Catch keyboard interrupt and terminate signal # Wait to receive server termination signal | 1.7323 | 2 |
week2/problems/problem2.py | Nburkhal/mit-cs250 | 0 | 769 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Now write a program that calculates the minimum fixed monthly payment needed
in order pay off a credit card balance within 12 months.
By a fixed monthly payment, we mean a single number which does not change each month,
but instead is a constant amount that will be paid each month.
In this problem, we will not be dealing with a minimum monthly payment rate.
The following variables contain values as described below:
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
The program should print out one line: the lowest monthly payment
that will pay off all debt in under 1 year, for example:
Lowest Payment: 180
Assume that the interest is compounded monthly according to
the balance at the end of the month (after the payment for that month is made).
The monthly payment must be a multiple of $10 and is the same for all months.
Notice that it is possible for the balance to become negative
using this payment scheme, which is okay. A summary of the required math is found below:
Monthly interest rate = (Annual interest rate) / 12.0
Monthly unpaid balance = (Previous balance) - (Minimum fixed monthly payment)
Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)
Test Case 1:
balance = 3329
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 310
Test Case 2:
balance = 4773
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 440
Test Case 3:
balance = 3926
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 360
"""
# Establish variables that we know / needed for the evaluation.
# Counter optional
balance = 3329
annualInterestRate = 0.2
monthlyInterestRate = annualInterestRate / 12
monthlyPayment = 0
updatedBalance = balance
counter = 0
# Will loop through everything until we find a rate that will reduce updatedBalance to 0.
while updatedBalance > 0:
# Was stated that payments needed to happen in increments of $10
monthlyPayment += 10
# To reset balance back to actual balance when loop inevitably fails.
updatedBalance = balance
month = 1
# For 12 months and while balance is not 0...
while month <= 12 and updatedBalance > 0:
# Subtract the ($10*n) amount
updatedBalance -= monthlyPayment
# Compound the interest AFTER making monthly payment
interest = monthlyInterestRate * updatedBalance
updatedBalance += interest
# Increase month counter
month += 1
counter += 1
print("Lowest Payment: ", monthlyPayment)
print("Number of iterations: ", counter)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Now write a program that calculates the minimum fixed monthly payment needed
in order pay off a credit card balance within 12 months.
By a fixed monthly payment, we mean a single number which does not change each month,
but instead is a constant amount that will be paid each month.
In this problem, we will not be dealing with a minimum monthly payment rate.
The following variables contain values as described below:
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
The program should print out one line: the lowest monthly payment
that will pay off all debt in under 1 year, for example:
Lowest Payment: 180
Assume that the interest is compounded monthly according to
the balance at the end of the month (after the payment for that month is made).
The monthly payment must be a multiple of $10 and is the same for all months.
Notice that it is possible for the balance to become negative
using this payment scheme, which is okay. A summary of the required math is found below:
Monthly interest rate = (Annual interest rate) / 12.0
Monthly unpaid balance = (Previous balance) - (Minimum fixed monthly payment)
Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)
Test Case 1:
balance = 3329
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 310
Test Case 2:
balance = 4773
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 440
Test Case 3:
balance = 3926
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 360
"""
# Establish variables that we know / needed for the evaluation.
# Counter optional
balance = 3329
annualInterestRate = 0.2
monthlyInterestRate = annualInterestRate / 12
monthlyPayment = 0
updatedBalance = balance
counter = 0
# Will loop through everything until we find a rate that will reduce updatedBalance to 0.
while updatedBalance > 0:
# Was stated that payments needed to happen in increments of $10
monthlyPayment += 10
# To reset balance back to actual balance when loop inevitably fails.
updatedBalance = balance
month = 1
# For 12 months and while balance is not 0...
while month <= 12 and updatedBalance > 0:
# Subtract the ($10*n) amount
updatedBalance -= monthlyPayment
# Compound the interest AFTER making monthly payment
interest = monthlyInterestRate * updatedBalance
updatedBalance += interest
# Increase month counter
month += 1
counter += 1
print("Lowest Payment: ", monthlyPayment)
print("Number of iterations: ", counter)
| en | 0.885734 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Now write a program that calculates the minimum fixed monthly payment needed in order pay off a credit card balance within 12 months. By a fixed monthly payment, we mean a single number which does not change each month, but instead is a constant amount that will be paid each month. In this problem, we will not be dealing with a minimum monthly payment rate. The following variables contain values as described below: balance - the outstanding balance on the credit card annualInterestRate - annual interest rate as a decimal The program should print out one line: the lowest monthly payment that will pay off all debt in under 1 year, for example: Lowest Payment: 180 Assume that the interest is compounded monthly according to the balance at the end of the month (after the payment for that month is made). The monthly payment must be a multiple of $10 and is the same for all months. Notice that it is possible for the balance to become negative using this payment scheme, which is okay. A summary of the required math is found below: Monthly interest rate = (Annual interest rate) / 12.0 Monthly unpaid balance = (Previous balance) - (Minimum fixed monthly payment) Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance) Test Case 1: balance = 3329 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 310 Test Case 2: balance = 4773 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 440 Test Case 3: balance = 3926 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 360 # Establish variables that we know / needed for the evaluation. # Counter optional # Will loop through everything until we find a rate that will reduce updatedBalance to 0. # Was stated that payments needed to happen in increments of $10 # To reset balance back to actual balance when loop inevitably fails. # For 12 months and while balance is not 0... # Subtract the ($10*n) amount # Compound the interest AFTER making monthly payment # Increase month counter | 4.644026 | 5 |
leetcode_python/Sort/sort-characters-by-frequency.py | yennanliu/CS_basics | 18 | 770 | # V0
import collections
class Solution(object):
def frequencySort(self, s):
count = collections.Counter(s)
count_dict = dict(count)
count_tuple_sorted = sorted(count_dict.items(), key=lambda kv : -kv[1])
res = ''
for item in count_tuple_sorted:
res += item[0] * item[1]
return res
# V0'
# IDEA : collections.Counter(s).most_common
class Solution(object):
def frequencySort(self, s):
return ''.join(c * t for c, t in collections.Counter(s).most_common())
# V1
# IDEA : SORT
# https://blog.csdn.net/fuxuemingzhu/article/details/79437548
import collections
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
count = collections.Counter(s).most_common()
res = ''
for c, v in count:
res += c * v
return res
### Test case:
s=Solution()
assert s.frequencySort(['a','b','c','c']) == 'ccab'
assert s.frequencySort(['a']) == 'a'
assert s.frequencySort(['a','A','c','c']) == 'ccaA'
assert s.frequencySort(['c','c','c']) == 'ccc'
assert s.frequencySort([]) == ''
assert s.frequencySort(['','','']) == ''
# V1'
# http://bookshadow.com/weblog/2016/11/02/leetcode-sort-characters-by-frequency/
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
return ''.join(c * t for c, t in collections.Counter(s).most_common())
# V2
import collections
class Solution(object):
def frequencySort(self, s):
# sort Counter by value
# https://stackoverflow.com/questions/20950650/how-to-sort-counter-by-value-python
s_freq_dict = collections.Counter(s).most_common()
output = ''
for i in range(len(s_freq_dict)):
output = output + (s_freq_dict[i][0]*s_freq_dict[i][1])
return output
# V2'
# Time: O(n)
# Space: O(n)
import collections
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
freq = collections.defaultdict(int)
for c in s:
freq[c] += 1
counts = [""] * (len(s)+1)
for c in freq:
counts[freq[c]] += c
result = ""
for count in reversed(range(len(counts)-1)):
for c in counts[count]:
result += c * count
return result
| # V0
import collections
class Solution(object):
def frequencySort(self, s):
count = collections.Counter(s)
count_dict = dict(count)
count_tuple_sorted = sorted(count_dict.items(), key=lambda kv : -kv[1])
res = ''
for item in count_tuple_sorted:
res += item[0] * item[1]
return res
# V0'
# IDEA : collections.Counter(s).most_common
class Solution(object):
def frequencySort(self, s):
return ''.join(c * t for c, t in collections.Counter(s).most_common())
# V1
# IDEA : SORT
# https://blog.csdn.net/fuxuemingzhu/article/details/79437548
import collections
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
count = collections.Counter(s).most_common()
res = ''
for c, v in count:
res += c * v
return res
### Test case:
s=Solution()
assert s.frequencySort(['a','b','c','c']) == 'ccab'
assert s.frequencySort(['a']) == 'a'
assert s.frequencySort(['a','A','c','c']) == 'ccaA'
assert s.frequencySort(['c','c','c']) == 'ccc'
assert s.frequencySort([]) == ''
assert s.frequencySort(['','','']) == ''
# V1'
# http://bookshadow.com/weblog/2016/11/02/leetcode-sort-characters-by-frequency/
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
return ''.join(c * t for c, t in collections.Counter(s).most_common())
# V2
import collections
class Solution(object):
def frequencySort(self, s):
# sort Counter by value
# https://stackoverflow.com/questions/20950650/how-to-sort-counter-by-value-python
s_freq_dict = collections.Counter(s).most_common()
output = ''
for i in range(len(s_freq_dict)):
output = output + (s_freq_dict[i][0]*s_freq_dict[i][1])
return output
# V2'
# Time: O(n)
# Space: O(n)
import collections
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
freq = collections.defaultdict(int)
for c in s:
freq[c] += 1
counts = [""] * (len(s)+1)
for c in freq:
counts[freq[c]] += c
result = ""
for count in reversed(range(len(counts)-1)):
for c in counts[count]:
result += c * count
return result
| en | 0.551683 | # V0 # V0' # IDEA : collections.Counter(s).most_common # V1 # IDEA : SORT # https://blog.csdn.net/fuxuemingzhu/article/details/79437548 :type s: str :rtype: str ### Test case: # V1' # http://bookshadow.com/weblog/2016/11/02/leetcode-sort-characters-by-frequency/ :type s: str :rtype: str # V2 # sort Counter by value # https://stackoverflow.com/questions/20950650/how-to-sort-counter-by-value-python # V2' # Time: O(n) # Space: O(n) :type s: str :rtype: str | 3.606774 | 4 |
eval/scripts/human/html_gen.py | chateval/chatevalv2 | 5 | 771 | <reponame>chateval/chatevalv2
"""Stores all the helper functions that generate html"""
import random
def generate_2choice_html(example):
'''Makes html for ranking form for the specified row index.
Returns the HTML for a table of radio buttons used for ranking,
as well as a count of the total number of radio buttons.
'''
# Check for duplicates.
if example.target_lines[0] == example.target_lines[1]:
return "", 0
# Find all the non-duplicate target indices.
target_indices = [0, 1]
# Randomize the order targets are shown in.
random.shuffle(target_indices)
num_targets = len(target_indices)
source_html = ''
speaker = 'A'
for utterance in example.source_line_utterances():
source_html += '<h4>Speaker %s: %s</h4>' % (speaker, utterance)
speaker = 'A' if speaker == 'B' else 'B'
html = """
<br/>
<div class="panel panel-default btn-group">
%s
<br/>
<table>
""" % (source_html)
html += """
<tr>
<td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[0]])
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key, target_indices[0])
html += """
<tr>
<td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[1]])
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key, target_indices[1])
html += """
<tr>
<td>It's a tie.</td>
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-tie" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key)
html += """
</table>
</div>
"""
return html, 1
def generate_ranking_tables_html(example):
'''Makes html for ranking form for the specified row index.
Returns the HTML for a table of radio buttons used for ranking,
as well as a count of the total number of radio buttons.
'''
# Find all the non-duplicate target indices.
target_indices = []
for idx in range(len(example.target_lines)):
current = example.target_lines[idx]
if current not in example.target_lines[0:idx] or idx == 0:
target_indices.append(idx)
# Randomize the order targets are shown in.
random.shuffle(target_indices)
num_targets = len(target_indices)
html = """
<br/>
<div class="panel panel-default btn-group">
<h4>Speaker A: %s</h4>
<table>
<tr>
<th></th>
""" % example.source_line
for idx in range(num_targets):
if idx == 0:
tag = 'best'
elif idx == num_targets - 1:
tag = 'worst'
else:
tag = ''
html += '<th align="center">%s<br>%s</th>' % (tag, idx+1)
html += "</tr>"
for idx in target_indices:
html += """
<tr>
<td>Speaker B: %s</td>""" % (example.target_lines[idx])
# Add a row of radio buttons whose length is the number of options.
for jdx in range(num_targets):
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="%s" value="%s"/>
</label>
</td>""" % (example.key, example.key, idx, jdx, jdx)
html += "</tr>"
html += """
</table>
</div>
"""
return html, num_targets
def generate_2choice_instructions():
return """
<p>Consider the following exchange between two speakers.</p>
<p>Your task is to decide which response sounds better given the previous things said.</p>
<p>If both responses are equally good, click "It's a tie."<p>
<p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p>
<table>
<tr><td>Speaker B: coffee would be great</td></tr>
<tr><td>Speaker B: I don't know what to say.</td></tr>
</table>
<br/>
<p>In this case, the first response is better as it directly answers Speaker A's question, so you should click the bubble next to it.</p>
<h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3>
"""
def generate_multuchoice_instructions():
return """
<p>Consider the following Twitter exchanges between Speakers A and B.</p>
<p>Your task is to rank the possible responses by Speaker B from best to worst, where the best response should get the lowest ranking.</p>
<br/>
<p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p>
<table>
<tr><td>Speaker B: coffee would be great</td></tr>
<tr><td>Speaker B: can you believe he missed the shot?</td></tr>
<tr><td>Speaker B: I don't know what to say.</td></tr>
</table>
<br/>
<p>In this case, the first response should be given rank 1, the second rank 2, and the third rank 3.</p>
<h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3>
"""
def generate_HIT_html(num_required, tables_html, instructions):
question_html_value = """
<HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd">
<HTMLContent><![CDATA[
<!DOCTYPE html>
<html>
<head>
<link crossorigin="anonymous" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.0.3/css/bootstrap.min.css" integrity="sha384-IS73LIqjtYesmURkDE9MXKbXqYA8rvKEp/ghicjem7Vc3mGRdQRptJSz60tvrB6+" rel="stylesheet" /><!-- The following snippet enables the 'responsive' behavior on smaller screens -->
<style>
table {
border-collapse: collapse;
display: block;
}
td, th {
border: 1px solid #ccc;
}
th:empty {
border: 0;
}
#collapseTrigger{
color:#fff;
display: block;
text-decoration: none;
}
* {
margin: 0; padding: 0;
}
tr td:nth-child(1) {
padding-left: 10px;
padding-right: 10px;
}
.panel {
padding: 10px
}
</style>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8'/>
<script src='https://s3.amazonaws.com/mturk-public/externalHIT_v1.js' type='text/javascript'></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script>
$(function() {
var col, el;
$("input[type=radio]").click(function() {
// Make sure only one radio button is enabled per column.
el = $(this);
col = el.data("col");
cl = el.attr("class");
//if cl.includes("ex-") {
$("input." + cl + "[data-col=" + col + "]").prop("checked", false);
//}
el.prop("checked", true);
console.log("Here!")
// Only enable submit if enough radio buttons are checked.
if ($('input:radio:checked').length >= """ + str(num_required) + """ ) {
$("input[type=submit]").removeAttr("disabled");
} else {
$("input[type=submit]").attr("disabled", "disabled");
}
});
});
$(document).ready(function() {
// Instructions expand/collapse
var content = $('#instructionBody');
var trigger = $('#collapseTrigger');
content.show();
$('.collapse-text').text('(Click to collapse)');
trigger.click(function(){
content.toggle();
var isVisible = content.is(':visible');
if(isVisible){
$('.collapse-text').text('(Click to collapse)');
}else{
$('.collapse-text').text('(Click to expand)');
}
});
// end expand/collapse
});
</script>
<title>Chatbot Evaluation Task</title>
</head>
<body>
<div class="col-xs-12 col-md-12"><!-- Instructions -->
<div class="panel panel-primary">
<!-- WARNING: the ids "collapseTrigger" and "instructionBody" are being used to enable expand/collapse feature -->
<a class="panel-heading" href="javascript:void(0);" id="collapseTrigger"><strong>Rate the Chatbot's Responses</strong> <span class="collapse-text">(Click to expand)</span> </a>
<div class="panel-body" id="instructionBody">
""" + instructions + """
</div>
</div>
</div>
<!-- HTML to handle creating the HIT form -->
<form name='mturk_form' method='post' id='mturk_form' action='https://workersandbox.mturk.com/mturk/externalSubmit'>
<input type='hidden' value='' name='assignmentId' id='assignmentId'/>
<!-- This is where you define your question(s) -->
""" + tables_html + """
<!-- HTML to handle submitting the HIT -->
<p><input type='submit' id='submitButton' value='Submit' /></p></form>
<h4>You must fill out rankings for every question before you can submit.</h4>
<script language='Javascript'>turkSetAssignmentID();</script>
</body>
</html>
]]>
</HTMLContent>
<FrameHeight>600</FrameHeight>
</HTMLQuestion>
"""
return question_html_value | """Stores all the helper functions that generate html"""
import random
def generate_2choice_html(example):
'''Makes html for ranking form for the specified row index.
Returns the HTML for a table of radio buttons used for ranking,
as well as a count of the total number of radio buttons.
'''
# Check for duplicates.
if example.target_lines[0] == example.target_lines[1]:
return "", 0
# Find all the non-duplicate target indices.
target_indices = [0, 1]
# Randomize the order targets are shown in.
random.shuffle(target_indices)
num_targets = len(target_indices)
source_html = ''
speaker = 'A'
for utterance in example.source_line_utterances():
source_html += '<h4>Speaker %s: %s</h4>' % (speaker, utterance)
speaker = 'A' if speaker == 'B' else 'B'
html = """
<br/>
<div class="panel panel-default btn-group">
%s
<br/>
<table>
""" % (source_html)
html += """
<tr>
<td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[0]])
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key, target_indices[0])
html += """
<tr>
<td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[1]])
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key, target_indices[1])
html += """
<tr>
<td>It's a tie.</td>
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-tie" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key)
html += """
</table>
</div>
"""
return html, 1
def generate_ranking_tables_html(example):
'''Makes html for ranking form for the specified row index.
Returns the HTML for a table of radio buttons used for ranking,
as well as a count of the total number of radio buttons.
'''
# Find all the non-duplicate target indices.
target_indices = []
for idx in range(len(example.target_lines)):
current = example.target_lines[idx]
if current not in example.target_lines[0:idx] or idx == 0:
target_indices.append(idx)
# Randomize the order targets are shown in.
random.shuffle(target_indices)
num_targets = len(target_indices)
html = """
<br/>
<div class="panel panel-default btn-group">
<h4>Speaker A: %s</h4>
<table>
<tr>
<th></th>
""" % example.source_line
for idx in range(num_targets):
if idx == 0:
tag = 'best'
elif idx == num_targets - 1:
tag = 'worst'
else:
tag = ''
html += '<th align="center">%s<br>%s</th>' % (tag, idx+1)
html += "</tr>"
for idx in target_indices:
html += """
<tr>
<td>Speaker B: %s</td>""" % (example.target_lines[idx])
# Add a row of radio buttons whose length is the number of options.
for jdx in range(num_targets):
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="%s" value="%s"/>
</label>
</td>""" % (example.key, example.key, idx, jdx, jdx)
html += "</tr>"
html += """
</table>
</div>
"""
return html, num_targets
def generate_2choice_instructions():
return """
<p>Consider the following exchange between two speakers.</p>
<p>Your task is to decide which response sounds better given the previous things said.</p>
<p>If both responses are equally good, click "It's a tie."<p>
<p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p>
<table>
<tr><td>Speaker B: coffee would be great</td></tr>
<tr><td>Speaker B: I don't know what to say.</td></tr>
</table>
<br/>
<p>In this case, the first response is better as it directly answers Speaker A's question, so you should click the bubble next to it.</p>
<h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3>
"""
def generate_multuchoice_instructions():
return """
<p>Consider the following Twitter exchanges between Speakers A and B.</p>
<p>Your task is to rank the possible responses by Speaker B from best to worst, where the best response should get the lowest ranking.</p>
<br/>
<p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p>
<table>
<tr><td>Speaker B: coffee would be great</td></tr>
<tr><td>Speaker B: can you believe he missed the shot?</td></tr>
<tr><td>Speaker B: I don't know what to say.</td></tr>
</table>
<br/>
<p>In this case, the first response should be given rank 1, the second rank 2, and the third rank 3.</p>
<h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3>
"""
def generate_HIT_html(num_required, tables_html, instructions):
question_html_value = """
<HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd">
<HTMLContent><![CDATA[
<!DOCTYPE html>
<html>
<head>
<link crossorigin="anonymous" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.0.3/css/bootstrap.min.css" integrity="sha384-IS73LIqjtYesmURkDE9MXKbXqYA8rvKEp/ghicjem7Vc3mGRdQRptJSz60tvrB6+" rel="stylesheet" /><!-- The following snippet enables the 'responsive' behavior on smaller screens -->
<style>
table {
border-collapse: collapse;
display: block;
}
td, th {
border: 1px solid #ccc;
}
th:empty {
border: 0;
}
#collapseTrigger{
color:#fff;
display: block;
text-decoration: none;
}
* {
margin: 0; padding: 0;
}
tr td:nth-child(1) {
padding-left: 10px;
padding-right: 10px;
}
.panel {
padding: 10px
}
</style>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8'/>
<script src='https://s3.amazonaws.com/mturk-public/externalHIT_v1.js' type='text/javascript'></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script>
$(function() {
var col, el;
$("input[type=radio]").click(function() {
// Make sure only one radio button is enabled per column.
el = $(this);
col = el.data("col");
cl = el.attr("class");
//if cl.includes("ex-") {
$("input." + cl + "[data-col=" + col + "]").prop("checked", false);
//}
el.prop("checked", true);
console.log("Here!")
// Only enable submit if enough radio buttons are checked.
if ($('input:radio:checked').length >= """ + str(num_required) + """ ) {
$("input[type=submit]").removeAttr("disabled");
} else {
$("input[type=submit]").attr("disabled", "disabled");
}
});
});
$(document).ready(function() {
// Instructions expand/collapse
var content = $('#instructionBody');
var trigger = $('#collapseTrigger');
content.show();
$('.collapse-text').text('(Click to collapse)');
trigger.click(function(){
content.toggle();
var isVisible = content.is(':visible');
if(isVisible){
$('.collapse-text').text('(Click to collapse)');
}else{
$('.collapse-text').text('(Click to expand)');
}
});
// end expand/collapse
});
</script>
<title>Chatbot Evaluation Task</title>
</head>
<body>
<div class="col-xs-12 col-md-12"><!-- Instructions -->
<div class="panel panel-primary">
<!-- WARNING: the ids "collapseTrigger" and "instructionBody" are being used to enable expand/collapse feature -->
<a class="panel-heading" href="javascript:void(0);" id="collapseTrigger"><strong>Rate the Chatbot's Responses</strong> <span class="collapse-text">(Click to expand)</span> </a>
<div class="panel-body" id="instructionBody">
""" + instructions + """
</div>
</div>
</div>
<!-- HTML to handle creating the HIT form -->
<form name='mturk_form' method='post' id='mturk_form' action='https://workersandbox.mturk.com/mturk/externalSubmit'>
<input type='hidden' value='' name='assignmentId' id='assignmentId'/>
<!-- This is where you define your question(s) -->
""" + tables_html + """
<!-- HTML to handle submitting the HIT -->
<p><input type='submit' id='submitButton' value='Submit' /></p></form>
<h4>You must fill out rankings for every question before you can submit.</h4>
<script language='Javascript'>turkSetAssignmentID();</script>
</body>
</html>
]]>
</HTMLContent>
<FrameHeight>600</FrameHeight>
</HTMLQuestion>
"""
return question_html_value | en | 0.489012 | Stores all the helper functions that generate html Makes html for ranking form for the specified row index. Returns the HTML for a table of radio buttons used for ranking, as well as a count of the total number of radio buttons. # Check for duplicates. # Find all the non-duplicate target indices. # Randomize the order targets are shown in. <br/> <div class="panel panel-default btn-group"> %s <br/> <table> <tr> <td>Speaker %s: %s</td> <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/> </label> </td> </tr> <tr> <td>Speaker %s: %s</td> <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/> </label> </td> </tr> <tr> <td>It's a tie.</td> <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-tie" data-col="1" value="1"/> </label> </td> </tr> </table> </div> Makes html for ranking form for the specified row index. Returns the HTML for a table of radio buttons used for ranking, as well as a count of the total number of radio buttons. # Find all the non-duplicate target indices. # Randomize the order targets are shown in. <br/> <div class="panel panel-default btn-group"> <h4>Speaker A: %s</h4> <table> <tr> <th></th> <tr> <td>Speaker B: %s</td> # Add a row of radio buttons whose length is the number of options. <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-%s" data-col="%s" value="%s"/> </label> </td> </table> </div> <p>Consider the following exchange between two speakers.</p> <p>Your task is to decide which response sounds better given the previous things said.</p> <p>If both responses are equally good, click "It's a tie."<p> <p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p> <table> <tr><td>Speaker B: coffee would be great</td></tr> <tr><td>Speaker B: I don't know what to say.</td></tr> </table> <br/> <p>In this case, the first response is better as it directly answers Speaker A's question, so you should click the bubble next to it.</p> <h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3> <p>Consider the following Twitter exchanges between Speakers A and B.</p> <p>Your task is to rank the possible responses by Speaker B from best to worst, where the best response should get the lowest ranking.</p> <br/> <p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p> <table> <tr><td>Speaker B: coffee would be great</td></tr> <tr><td>Speaker B: can you believe he missed the shot?</td></tr> <tr><td>Speaker B: I don't know what to say.</td></tr> </table> <br/> <p>In this case, the first response should be given rank 1, the second rank 2, and the third rank 3.</p> <h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3> <HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd"> <HTMLContent><![CDATA[ <!DOCTYPE html> <html> <head> <link crossorigin="anonymous" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.0.3/css/bootstrap.min.css" integrity="sha384-IS73LIqjtYesmURkDE9MXKbXqYA8rvKEp/ghicjem7Vc3mGRdQRptJSz60tvrB6+" rel="stylesheet" /><!-- The following snippet enables the 'responsive' behavior on smaller screens --> <style> table { border-collapse: collapse; display: block; } td, th { border: 1px solid #ccc; } th:empty { border: 0; } #collapseTrigger{ color:#fff; display: block; text-decoration: none; } * { margin: 0; padding: 0; } tr td:nth-child(1) { padding-left: 10px; padding-right: 10px; } .panel { padding: 10px } </style> <meta http-equiv='Content-Type' content='text/html; charset=UTF-8'/> <script src='https://s3.amazonaws.com/mturk-public/externalHIT_v1.js' type='text/javascript'></script> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> <script> $(function() { var col, el; $("input[type=radio]").click(function() { // Make sure only one radio button is enabled per column. el = $(this); col = el.data("col"); cl = el.attr("class"); //if cl.includes("ex-") { $("input." + cl + "[data-col=" + col + "]").prop("checked", false); //} el.prop("checked", true); console.log("Here!") // Only enable submit if enough radio buttons are checked. if ($('input:radio:checked').length >= ) { $("input[type=submit]").removeAttr("disabled"); } else { $("input[type=submit]").attr("disabled", "disabled"); } }); }); $(document).ready(function() { // Instructions expand/collapse var content = $('#instructionBody'); var trigger = $('#collapseTrigger'); content.show(); $('.collapse-text').text('(Click to collapse)'); trigger.click(function(){ content.toggle(); var isVisible = content.is(':visible'); if(isVisible){ $('.collapse-text').text('(Click to collapse)'); }else{ $('.collapse-text').text('(Click to expand)'); } }); // end expand/collapse }); </script> <title>Chatbot Evaluation Task</title> </head> <body> <div class="col-xs-12 col-md-12"><!-- Instructions --> <div class="panel panel-primary"> <!-- WARNING: the ids "collapseTrigger" and "instructionBody" are being used to enable expand/collapse feature --> <a class="panel-heading" href="javascript:void(0);" id="collapseTrigger"><strong>Rate the Chatbot's Responses</strong> <span class="collapse-text">(Click to expand)</span> </a> <div class="panel-body" id="instructionBody"> </div> </div> </div> <!-- HTML to handle creating the HIT form --> <form name='mturk_form' method='post' id='mturk_form' action='https://workersandbox.mturk.com/mturk/externalSubmit'> <input type='hidden' value='' name='assignmentId' id='assignmentId'/> <!-- This is where you define your question(s) --> <!-- HTML to handle submitting the HIT --> <p><input type='submit' id='submitButton' value='Submit' /></p></form> <h4>You must fill out rankings for every question before you can submit.</h4> <script language='Javascript'>turkSetAssignmentID();</script> </body> </html> ]]> </HTMLContent> <FrameHeight>600</FrameHeight> </HTMLQuestion> | 3.551254 | 4 |
python3_module_template/subproject/myexample.py | sdpython/python_project_template | 0 | 772 | <filename>python3_module_template/subproject/myexample.py
# -*- coding: utf-8 -*-
"""
@file
@brief This the documentation of this module (myexampleb).
"""
class myclass:
"""
This is the documentation for this class.
**example with a sphinx directives**
It works everywhere in the documentation.
.. exref::
:title: an example of use
Just for documentation purpose.
::
m = myclass(0)
The old way:
@example(an old example of use)
This only works from the code,
not inserted in a RST file. The source
documentation is parsed and every such example is
collected and placed in a page ``all_examples.rst``
(look at the source).
@code
m = myclass(0)
@endcode
@endexample
**FAQ**
.. faqref::
:title: How to add a question ?
Just look a this section.
Look also :ref:`l-FAQ2`.
.. faqref::
:title: Add a label
:lid: label1
Look also :ref:`l-FAQ2`.
**BLOC**
.. blocref::
:title: How to add a bloc
:tag: aaaa
Just look a this bloc.
Look also :ref:`l-FAQ2`.
An accent, é, to check it is working.
A link to github source: :githublink:`source|py`.
"""
def __init__(self, pa):
"""
documentation for the constructor
@param pa first parameter
"""
self.pa = pa
def get_value(self, mul):
"""
returns the parameter multiplied by a value
@param mul a float
@return a float
"""
return self.pa * mul
| <filename>python3_module_template/subproject/myexample.py
# -*- coding: utf-8 -*-
"""
@file
@brief This the documentation of this module (myexampleb).
"""
class myclass:
"""
This is the documentation for this class.
**example with a sphinx directives**
It works everywhere in the documentation.
.. exref::
:title: an example of use
Just for documentation purpose.
::
m = myclass(0)
The old way:
@example(an old example of use)
This only works from the code,
not inserted in a RST file. The source
documentation is parsed and every such example is
collected and placed in a page ``all_examples.rst``
(look at the source).
@code
m = myclass(0)
@endcode
@endexample
**FAQ**
.. faqref::
:title: How to add a question ?
Just look a this section.
Look also :ref:`l-FAQ2`.
.. faqref::
:title: Add a label
:lid: label1
Look also :ref:`l-FAQ2`.
**BLOC**
.. blocref::
:title: How to add a bloc
:tag: aaaa
Just look a this bloc.
Look also :ref:`l-FAQ2`.
An accent, é, to check it is working.
A link to github source: :githublink:`source|py`.
"""
def __init__(self, pa):
"""
documentation for the constructor
@param pa first parameter
"""
self.pa = pa
def get_value(self, mul):
"""
returns the parameter multiplied by a value
@param mul a float
@return a float
"""
return self.pa * mul
| en | 0.605477 | # -*- coding: utf-8 -*- @file @brief This the documentation of this module (myexampleb). This is the documentation for this class. **example with a sphinx directives** It works everywhere in the documentation. .. exref:: :title: an example of use Just for documentation purpose. :: m = myclass(0) The old way: @example(an old example of use) This only works from the code, not inserted in a RST file. The source documentation is parsed and every such example is collected and placed in a page ``all_examples.rst`` (look at the source). @code m = myclass(0) @endcode @endexample **FAQ** .. faqref:: :title: How to add a question ? Just look a this section. Look also :ref:`l-FAQ2`. .. faqref:: :title: Add a label :lid: label1 Look also :ref:`l-FAQ2`. **BLOC** .. blocref:: :title: How to add a bloc :tag: aaaa Just look a this bloc. Look also :ref:`l-FAQ2`. An accent, é, to check it is working. A link to github source: :githublink:`source|py`. documentation for the constructor @param pa first parameter returns the parameter multiplied by a value @param mul a float @return a float | 2.568743 | 3 |
simulation/dataset_G_1q_X_Z_N1.py | eperrier/QDataSet | 42 | 773 | <reponame>eperrier/QDataSet
##############################################
"""
This module generate a dataset
"""
##############################################
# preample
import numpy as np
from utilites import Pauli_operators, simulate, CheckNoise
################################################
# meta parameters
name = "G_1q_X_Z_N1"
################################################
# quantum parameters
dim = 2 # dimension of the system
Omega = 12 # qubit energy gap
static_operators = [0.5*Pauli_operators[3]*Omega] # drift Hamiltonian
dynamic_operators = [0.5*Pauli_operators[1]] # control Hamiltonian
noise_operators = [0.5*Pauli_operators[3]] # noise Hamiltonian
initial_states = [
np.array([[0.5,0.5],[0.5,0.5]]), np.array([[0.5,-0.5],[-0.5,0.5]]),
np.array([[0.5,-0.5j],[0.5j,0.5]]),np.array([[0.5,0.5j],[-0.5j,0.5]]),
np.array([[1,0],[0,0]]), np.array([[0,0],[0,1]])
] # intial state of qubit
measurement_operators = Pauli_operators[1:] # measurement operators
##################################################
# simulation parameters
T = 1 # Evolution time
M = 1024 # Number of time steps
num_ex = 10000 # Number of examples
batch_size = 50 # batch size for TF
##################################################
# noise parameters
K = 2000 # Number of realzations
noise_profile = [1] # Noise type
###################################################
# control parameters
pulse_shape = "Gaussian" # Control pulse shape
num_pulses = 5 # Number of pulses per sequence
####################################################
# Generate the dataset
sim_parameters = dict( [(k,eval(k)) for k in ["name", "dim", "Omega", "static_operators", "dynamic_operators", "noise_operators", "measurement_operators", "initial_states", "T", "M", "num_ex", "batch_size", "K", "noise_profile", "pulse_shape", "num_pulses"] ])
CheckNoise(sim_parameters)
simulate(sim_parameters)
#################################################### | ##############################################
"""
This module generate a dataset
"""
##############################################
# preample
import numpy as np
from utilites import Pauli_operators, simulate, CheckNoise
################################################
# meta parameters
name = "G_1q_X_Z_N1"
################################################
# quantum parameters
dim = 2 # dimension of the system
Omega = 12 # qubit energy gap
static_operators = [0.5*Pauli_operators[3]*Omega] # drift Hamiltonian
dynamic_operators = [0.5*Pauli_operators[1]] # control Hamiltonian
noise_operators = [0.5*Pauli_operators[3]] # noise Hamiltonian
initial_states = [
np.array([[0.5,0.5],[0.5,0.5]]), np.array([[0.5,-0.5],[-0.5,0.5]]),
np.array([[0.5,-0.5j],[0.5j,0.5]]),np.array([[0.5,0.5j],[-0.5j,0.5]]),
np.array([[1,0],[0,0]]), np.array([[0,0],[0,1]])
] # intial state of qubit
measurement_operators = Pauli_operators[1:] # measurement operators
##################################################
# simulation parameters
T = 1 # Evolution time
M = 1024 # Number of time steps
num_ex = 10000 # Number of examples
batch_size = 50 # batch size for TF
##################################################
# noise parameters
K = 2000 # Number of realzations
noise_profile = [1] # Noise type
###################################################
# control parameters
pulse_shape = "Gaussian" # Control pulse shape
num_pulses = 5 # Number of pulses per sequence
####################################################
# Generate the dataset
sim_parameters = dict( [(k,eval(k)) for k in ["name", "dim", "Omega", "static_operators", "dynamic_operators", "noise_operators", "measurement_operators", "initial_states", "T", "M", "num_ex", "batch_size", "K", "noise_profile", "pulse_shape", "num_pulses"] ])
CheckNoise(sim_parameters)
simulate(sim_parameters)
#################################################### | de | 0.553108 | ############################################## This module generate a dataset ############################################## # preample ################################################ # meta parameters ################################################ # quantum parameters # dimension of the system # qubit energy gap # drift Hamiltonian # control Hamiltonian # noise Hamiltonian # intial state of qubit # measurement operators ################################################## # simulation parameters # Evolution time # Number of time steps # Number of examples # batch size for TF ################################################## # noise parameters # Number of realzations # Noise type ################################################### # control parameters # Control pulse shape # Number of pulses per sequence #################################################### # Generate the dataset #################################################### | 2.091626 | 2 |
configs/mmdet/detection/detection_tensorrt_static-300x300.py | zhiqwang/mmdeploy | 746 | 774 | <reponame>zhiqwang/mmdeploy
_base_ = ['../_base_/base_tensorrt_static-300x300.py']
| _base_ = ['../_base_/base_tensorrt_static-300x300.py'] | none | 1 | 1.046886 | 1 |
|
user_service/user_service/api.py | Ziang-Lu/Flask-Blog | 0 | 775 | <filename>user_service/user_service/api.py
# -*- coding: utf-8 -*-
"""
API definition module.
"""
from flask import Blueprint
from flask_restful import Api
from .resources.user import UserAuth, UserItem, UserList, UserFollow
# Create an API-related blueprint
api_bp = Blueprint(name='api', import_name=__name__)
api = Api(api_bp)
api.add_resource(UserList, '/users')
api.add_resource(UserItem, '/users/<int:id>')
api.add_resource(UserAuth, '/user-auth')
api.add_resource(
UserFollow, '/user-follow/<int:follower_id>/<followed_username>'
)
| <filename>user_service/user_service/api.py
# -*- coding: utf-8 -*-
"""
API definition module.
"""
from flask import Blueprint
from flask_restful import Api
from .resources.user import UserAuth, UserItem, UserList, UserFollow
# Create an API-related blueprint
api_bp = Blueprint(name='api', import_name=__name__)
api = Api(api_bp)
api.add_resource(UserList, '/users')
api.add_resource(UserItem, '/users/<int:id>')
api.add_resource(UserAuth, '/user-auth')
api.add_resource(
UserFollow, '/user-follow/<int:follower_id>/<followed_username>'
)
| en | 0.741321 | # -*- coding: utf-8 -*- API definition module. # Create an API-related blueprint | 2.392179 | 2 |
advanced/itertools_funcs.py | ariannasg/python3-essential-training | 1 | 776 | <reponame>ariannasg/python3-essential-training
#!usr/bin/env python3
import itertools
# itertools is a module that's not technically a set of built-in functions but
# it is part of the standard library that comes with python.
# it's useful for for creating and using iterators.
def main():
print('some infinite iterators')
# cycle iterator can be used to cycle over a collection over and over
seq1 = ["Joe", "John", "Mike"]
cycle1 = itertools.cycle(seq1)
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
# use count to create a simple counter
count1 = itertools.count(100, 3)
print(next(count1))
print(next(count1))
print(next(count1))
print('some non-infinite iterators')
values = [10, 5, 20, 30, 40, 50, 40, 30]
# accumulate creates an iterator that accumulates/aggregates values
print(list(itertools.accumulate(values))) # this defaults to addition
print(list(itertools.accumulate(values, max)))
print(list(itertools.accumulate(values, min)))
# use chain to connect sequences together
x = itertools.chain('ABCD', '1234')
print(list(x))
# dropwhile and takewhile will return values until
# a certain condition is met that stops them. they are similar to the
# filter built-in function.
# dropwhile will drop the values from the sequence as long as the
# condition of the function is true and then returns the rest of values
print(list(itertools.dropwhile(is_less_than_forty, values)))
# takewhile will keep the values from the sequence as long as the
# condition of the function is true and then stops giving data
print(list(itertools.takewhile(is_less_than_forty, values)))
def is_less_than_forty(x):
return x < 40
if __name__ == "__main__":
main()
# CONSOLE OUTPUT:
# some infinite iterators
# Joe
# John
# Mike
# Joe
# John
# 100
# 103
# 106
# some non-infinite iterators
# [10, 15, 35, 65, 105, 155, 195, 225]
# [10, 10, 20, 30, 40, 50, 50, 50]
# [10, 5, 5, 5, 5, 5, 5, 5]
# ['A', 'B', 'C', 'D', '1', '2', '3', '4']
# [40, 50, 40, 30]
# [10, 5, 20, 30]
| #!usr/bin/env python3
import itertools
# itertools is a module that's not technically a set of built-in functions but
# it is part of the standard library that comes with python.
# it's useful for for creating and using iterators.
def main():
print('some infinite iterators')
# cycle iterator can be used to cycle over a collection over and over
seq1 = ["Joe", "John", "Mike"]
cycle1 = itertools.cycle(seq1)
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
# use count to create a simple counter
count1 = itertools.count(100, 3)
print(next(count1))
print(next(count1))
print(next(count1))
print('some non-infinite iterators')
values = [10, 5, 20, 30, 40, 50, 40, 30]
# accumulate creates an iterator that accumulates/aggregates values
print(list(itertools.accumulate(values))) # this defaults to addition
print(list(itertools.accumulate(values, max)))
print(list(itertools.accumulate(values, min)))
# use chain to connect sequences together
x = itertools.chain('ABCD', '1234')
print(list(x))
# dropwhile and takewhile will return values until
# a certain condition is met that stops them. they are similar to the
# filter built-in function.
# dropwhile will drop the values from the sequence as long as the
# condition of the function is true and then returns the rest of values
print(list(itertools.dropwhile(is_less_than_forty, values)))
# takewhile will keep the values from the sequence as long as the
# condition of the function is true and then stops giving data
print(list(itertools.takewhile(is_less_than_forty, values)))
def is_less_than_forty(x):
return x < 40
if __name__ == "__main__":
main()
# CONSOLE OUTPUT:
# some infinite iterators
# Joe
# John
# Mike
# Joe
# John
# 100
# 103
# 106
# some non-infinite iterators
# [10, 15, 35, 65, 105, 155, 195, 225]
# [10, 10, 20, 30, 40, 50, 50, 50]
# [10, 5, 5, 5, 5, 5, 5, 5]
# ['A', 'B', 'C', 'D', '1', '2', '3', '4']
# [40, 50, 40, 30]
# [10, 5, 20, 30] | en | 0.839545 | #!usr/bin/env python3 # itertools is a module that's not technically a set of built-in functions but # it is part of the standard library that comes with python. # it's useful for for creating and using iterators. # cycle iterator can be used to cycle over a collection over and over # use count to create a simple counter # accumulate creates an iterator that accumulates/aggregates values # this defaults to addition # use chain to connect sequences together # dropwhile and takewhile will return values until # a certain condition is met that stops them. they are similar to the # filter built-in function. # dropwhile will drop the values from the sequence as long as the # condition of the function is true and then returns the rest of values # takewhile will keep the values from the sequence as long as the # condition of the function is true and then stops giving data # CONSOLE OUTPUT: # some infinite iterators # Joe # John # Mike # Joe # John # 100 # 103 # 106 # some non-infinite iterators # [10, 15, 35, 65, 105, 155, 195, 225] # [10, 10, 20, 30, 40, 50, 50, 50] # [10, 5, 5, 5, 5, 5, 5, 5] # ['A', 'B', 'C', 'D', '1', '2', '3', '4'] # [40, 50, 40, 30] # [10, 5, 20, 30] | 4.607186 | 5 |
aula 05/model/Pessoa.py | Azenha/AlgProg2 | 0 | 777 | <gh_stars>0
class Pessoa:
def __init__(self, codigo, nome, endereco, telefone):
self.__codigo = int(codigo)
self.nome = str(nome)
self._endereco = str(endereco)
self.__telefone = str(telefone)
def imprimeNome(self):
print(f"Você pode chamar essa pessoa de {self.nome}.")
def __imprimeTelefone(self):
print(f"Você pode ligar para esta pessoa no número {self.__telefone}.") | class Pessoa:
def __init__(self, codigo, nome, endereco, telefone):
self.__codigo = int(codigo)
self.nome = str(nome)
self._endereco = str(endereco)
self.__telefone = str(telefone)
def imprimeNome(self):
print(f"Você pode chamar essa pessoa de {self.nome}.")
def __imprimeTelefone(self):
print(f"Você pode ligar para esta pessoa no número {self.__telefone}.") | none | 1 | 3.21991 | 3 |
|
examples/plain_text_response.py | lukefx/stardust | 2 | 778 | <filename>examples/plain_text_response.py
from starlette.responses import PlainTextResponse
async def serve(req):
return PlainTextResponse("Hello World!")
| <filename>examples/plain_text_response.py
from starlette.responses import PlainTextResponse
async def serve(req):
return PlainTextResponse("Hello World!")
| none | 1 | 1.825127 | 2 |
|
pypyrus_logbook/logger.py | t3eHawk/pypyrus_logbook | 0 | 779 | <filename>pypyrus_logbook/logger.py<gh_stars>0
import atexit
import datetime as dt
import os
import platform
import pypyrus_logbook as logbook
import sys
import time
import traceback
from .conf import all_loggers
from .formatter import Formatter
from .header import Header
from .output import Root
from .record import Record
from .sysinfo import Sysinfo
class Logger():
"""This class represents a single logger.
Logger by it self is a complex set of methods, items and commands that
together gives funcionality for advanced logging in different outputs:
console, file, email, database table, HTML document - and using information
from diffrent inputs: user messages, traceback, frames, user parameters,
execution arguments and systems descriptors.
Each logger must have an unique name which will help to identify it.
Main application logger will have the same name as a python script file.
It can be accessed by native logbook methods or by calling `getlogger()`
method with no name.
Parameters
----------
name : str, optional
The argument is used te define `name` attribute
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records. The default is True.
debug : bool, optional
The argument is used to filter debug records. The default is False.
warning : bool, optional
The argument is used to filter warning records. The default is True.
error : bool, optional
The argument is used to filter error records. The default is True.
critical : bool, optional
The argument is used to filter critical records. The default is True.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism. The
default is True.
control : bool, optional
The argument is used to enable or disable execution break in case
on error. The default is True.
maxsize : int or bool, optional
The argument is used to define maximum size of output file. Must be
presented as number of bytes. The default is 10 Mb.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file. The default is 1 which means that new output file
will be opened at each 00:00:00.
maxlevel : int or bool, optional
The argument is used to define the break error level (WARNING = 0,
ERRROR = 1, CRITICAL = 2). All that higher the break level will
interrupt application execution. The default is 1.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors. The default
is False which means it is disabled.
Attributes
----------
name : str
Name of the logger.
app : str
Name of the application that we are logging.
desc : str
Description of the application that we are logging.
version : str
Version of the application that we are logging.
start_date : datetime.datetime
Date when logging was started.
rectypes : dict
All available record types. Keys are used in `Logger` write methods as
`rectype` argument. Values are used in formatting. So if you wish to
modify `rectype` form then edit appropriate one here. If you wish to
use own record types then just add it to that dictinary. By default we
provide the next few record types:
+---------+---------+
| Key | Value |
+=========+=========+
|none |NONE |
+---------+---------+
|info |INFO |
+---------+---------+
|debug |DEBUG |
+---------+---------+
|warning |WARNING |
+---------+---------+
|error |ERROR |
+---------+---------+
|critical |CRITICAL |
+---------+---------+
messages : dict
Messages that are printed with some `Logger` methods like `ok()`,
`success()`, `fail()`. If you wish to modify the text of this messages
just edit the value of appropriate item.
with_errors : int
The flag shows that logger catched errors in the application during its
execution.
count_errors : int
Number of errors that logger catched in the application during its
execution.
filters : dict
Record types filters. To filter record type just set corresponding
item value to False.
root : pypyrus_logbook.output.Root
The output `Root` object.
console : pypyrus_logbook.output.Console
The output `Console` object. Shortcut for `Logger.root.console`.
file : pypyrus_logbook.output.File
The output file. Shortcut for `Logger.output.file`.
email : pypyrus_logbook.output.Email
The output email. Shortcut for `Logger.output.email`.
html: pypyrus_logbook.output.HTML
The output HTML document. Shortcut for `Logger.output.html`.
table: pypyrus_logbook.output.Table
The output table. Shortcut for `Logger.output.table`.
formatter : pypyrus_logbook.formatter.Formatter
Logger formatter which sets all formatting configuration like
record template, error message template, line length etc.
sysinfo : pypyrus_logbook.sysinfo.Sysinfo
Special input object which parse different inputs includeing system
specifications, flag arguments, execution parameters, user parameters
and environment variables and transforms all of that to `Dataset`
object. Through the `Dataset` object data can be easily accessed by
get item operation or by point like `sysinfo.desc['hostname']` or
`sysinfo.desc.hostname`.
header : pypyrus_logbook.header.Header
The header that can be printed to the writable output.
"""
def __init__(self, name=None, app=None, desc=None, version=None,
status=True, console=True, file=True, email=False, html=False,
table=False, directory=None, filename=None, extension=None,
smtp=None, db=None, format=None, info=True, debug=False,
warning=True, error=True, critical=True, alarming=True,
control=True, maxsize=(1024*1024*10), maxdays=1, maxlevel=2,
maxerrors=False):
# Unique name of the logger.
self._name = name
# Attributes describing the application.
self.app = None
self.desc = None
self.version = None
# Some logger important attributes
self._start_date = dt.datetime.now()
self.rectypes = {'none': 'NONE', 'info': 'INFO', 'debug': 'DEBUG',
'warning': 'WARNING', 'error': 'ERROR',
'critical': 'CRITICAL'}
self.messages = {'ok': 'OK', 'success': 'SUCCESS', 'fail': 'FAIL'}
self._with_error = False
self._count_errors = 0
# Complete the initial configuration.
self.configure(app=app, desc=desc, version=version, status=status,
console=console, file=file, email=email, html=html,
table=table, directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db, format=format,
info=info, debug=debug, warning=warning, error=error,
critical=critical, alarming=alarming, control=control,
maxsize=maxsize, maxdays=maxdays, maxlevel=maxlevel,
maxerrors=maxerrors)
# Output shortcuts.
self.console = self.root.console
self.file = self.root.file
self.email = self.root.email
self.html = self.root.html
self.table = self.root.table
# Set exit function.
atexit.register(self._exit)
# Add creating logger to special all_loggers dictinary.
all_loggers[self._name] = self
pass
def __str__(self):
return f'<Logger object "{self._name}">'
__repr__ = __str__
@property
def name(self):
"""Unique logger name."""
return self._name
@property
def start_date(self):
"""Logging start date."""
return self._start_date
@property
def with_error(self):
"""Flag that shows was an error or not."""
return self._with_error
@property
def count_errors(self):
"""The number of occured errors."""
return self._count_errors
def configure(self, app=None, desc=None, version=None, status=None,
console=None, file=None, email=None, html=None, table=None,
directory=None, filename=None, extension=None, smtp=None,
db=None, format=None, info=None, debug=None, warning=None,
error=None, critical=None, alarming=None, control=None,
maxsize=None, maxdays=None, maxlevel=None, maxerrors=None):
"""Main method to configure the logger and all its attributes.
This is an only one right way to customize logger. Parameters are the
same as for creatrion.
Parameters
----------
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records.
debug : bool, optional
The argument is used to filter debug records.
warning : bool, optional
The argument is used to filter warning records.
error : bool, optional
The argument is used to filter error records.
critical : bool, optional
The argument is used to filter critical records.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism.
control : bool, optional
The argument is used to enable or disable execution break in case
on error.
maxsize : int or bool, optional
The argument is used to define maximum size of output file.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file.
maxlevel : int or bool, optional
The argument is used to define the break error level.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors.
"""
if isinstance(app, str) is True: self.app = app
if isinstance(desc, str) is True: self.desc = desc
if isinstance(version, (str, int, float)) is True:
self.version = version
# Build the output root if it is not exists. In other case modify
# existing output if it is requested.
if hasattr(self, 'root') is False:
self.root = Root(self, console=console, file=file, email=email,
html=html, table=table, status=status,
directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db)
else:
for key, value in {'console': console, 'file': file,
'email': email, 'html': html,
'table': table}.items():
if value is True:
getattr(self.root, key).open()
if key == 'file':
getattr(self.root, key).new()
elif value is False:
getattr(self.root, key).close()
# Customize output file path.
path = {}
if directory is not None: path['dir'] = directory
if filename is not None: path['name'] = filename
if extension is not None: path['ext'] = extension
if len(path) > 0:
self.root.file.configure(**path)
# Customize SMTP server.
if isinstance(smtp, dict) is True:
self.root.email.configure(**smtp)
# Customize database connection.
if isinstance(db, dict) is True:
self.root.table.configure(**db)
# Create formatter in case it is not exists yet or just customize it.
# Parameter format can be either string or dictionary.
# When it is string then it must describe records format.
# When it is dictionary it can contaion any parameter of formatter
# that must be customized.
if isinstance(format, str) is True:
format = {'record': format}
if hasattr(self, 'formatter') is False:
format = {} if isinstance(format, dict) is False else format
self.formatter = Formatter(**format)
elif isinstance(format, dict) is True:
self.formatter.configure(**format)
# Create or customize record type filters.
if hasattr(self, 'filters') is False:
self.filters = {}
for key, value in {'info': info, 'debug': debug, 'error': error,
'warning': warning, 'critical': critical}.items():
if isinstance(value, bool) is True:
self.filters[key] = value
# Customize limits and parameters of execution behaviour.
if isinstance(maxsize, (int, float, bool)) is True:
self._maxsize = maxsize
if isinstance(maxdays, (int, float, bool)) is True:
self._maxdays = maxdays
self.__calculate_restart_date()
if isinstance(maxlevel, (int, float, bool)) is True:
self._maxlevel = maxlevel
if isinstance(maxerrors, (int, float, bool)) is True:
self._maxerrors = maxerrors
if isinstance(alarming, bool) is True:
self._alarming = alarming
if isinstance(control, bool) is True:
self._control = control
# Initialize sysinfo instance when not exists.
if hasattr(self, 'sysinfo') is False:
self.sysinfo = Sysinfo(self)
# Initialize header instance when not exists.
if hasattr(self, 'header') is False:
self.header = Header(self)
pass
def write(self, record):
"""Direct write to the output.
Parameters
----------
record : Record
The argument is used to send it to the output `root`.
"""
self.__check_file_stats()
self.root.write(record)
pass
def record(self, rectype, message, error=False, **kwargs):
"""Basic method to write records.
Parameters
----------
rectype : str
By default method creates the record with the type NONE.
That can be changed but depends on available record types.
All registered record types are stored in the instance attribute
rectypes. If you wish to use own record type or change the
presentaion of exeisting one then edit this dictinary.
message : str
The message that must be written.
error : bool, optional
If record is error then set that parameter to `True`.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
if self.filters.get(rectype, True) is True:
record = Record(self, rectype, message, error=error, **kwargs)
self.write(record)
pass
def info(self, message, **kwargs):
"""Send INFO record to output."""
rectype = 'info'
self.record(rectype, message, **kwargs)
pass
def debug(self, message, **kwargs):
"""Send DEBUG record to the output."""
rectype = 'debug'
self.record(rectype, message, **kwargs)
pass
def error(self, message=None, rectype='error', format=None, alarming=False,
level=1, **kwargs):
"""Send ERROR record to the output.
If exception in current traceback exists then method will format the
exception according to `formatter.error` string presentation. If
`formatter.error` is set to `False` the exception will be just printed
in original Python style.
Also method will send an alarm if alarming attribute is `True`, email
output is enabled and SMTP server is configurated.
If one of the limit triggers worked then application will be aborted.
Parameters
----------
message : str, optional
The message that must be written instead of exception.
rectype : str, optional
The type of error according to `rectypes` dictionary.
format : str, optional
The format of the error message.
alarming : bool
The argument is used to enable or disable the alarming mechanism
for this certain call.
level : int
The argument is used to describe the error level.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
self._with_error = True
self._count_errors += 1
format = self.formatter.error if format is None else format
# Parse the error.
err_type, err_value, err_tb = sys.exc_info()
if message is None and err_type is not None:
if isinstance(format, str) is True:
err_name = err_type.__name__
err_value = err_value
for tb in traceback.walk_tb(err_tb):
f_code = tb[0].f_code
err_file = os.path.abspath(f_code.co_filename)
err_line = tb[1]
err_obj = f_code.co_name
self.record(rectype, message, error=True,
err_name=err_name, err_value=err_value,
err_file=err_file, err_line=err_line,
err_obj=err_obj, **kwargs)
elif format is False:
exception = traceback.format_exception(err_type, err_value,
err_tb)
message = '\n'
message += ''.join(exception)
self.record(rectype, message, **kwargs)
else:
message = message or ''
self.record(rectype, message, **kwargs)
# Break execution in case of critical error if permitted.
# The alarm will be generated at exit if it is configured.
if self._control is True:
if level >= self._maxlevel:
sys.exit()
if self._maxerrors is not False:
if self._count_errors > self._maxerrors:
sys.exit()
# Send alarm if execution was not aborted but alarm is needed.
if alarming is True:
self.root.email.alarm()
pass
def warning(self, message=None, **kwargs):
"""Send WARNING error record to the output."""
self.error(message, rectype='warning', level=0, **kwargs)
pass
def critical(self, message=None, **kwargs):
"""Send CRITICAL error record to the output."""
self.error(message, rectype='critical', level=2, **kwargs)
pass
def head(self):
"""Send header to the output."""
string = self.header.create()
self.write(string)
pass
def subhead(self, string):
"""Send subheader as upper-case text between two border lines to the
output.
Parameters
----------
string : str
The text that will be presented as subheader.
"""
bound = f'{self.formatter.div*self.formatter.length}\n'
string = f'{bound}\t{string}\n{bound}'.upper()
self.write(string)
pass
def line(self, message):
"""Send raw text with the new line to the output.
Parameters
----------
message : str
The message that must be written.
"""
self.write(f'{message}\n')
pass
def bound(self, div=None, length=None):
"""Write horizontal border in the output. Useful when need to separate
different blocks of information.
Parameters
----------
div : str, optional
Symbol that is used to bulid the bound.
length : int, optional
Lenght of the bound.
"""
border = self.formatter.div * self.formatter.length
self.write(border + '\n')
pass
def blank(self, number=1):
"""Write blank lines in the output.
Parameters
----------
number : int, optional
The number of the blank lines that must be written.
"""
string = '\n'*number
self.write(string)
pass
def ok(self, **kwargs):
"""Print INFO message with OK."""
rectype = 'info'
message = self.messages['ok']
self.record(rectype, message, **kwargs)
pass
def success(self, **kwargs):
"""Print INFO message with SUCCESS."""
rectype = 'info'
message = self.messages['success']
self.record(rectype, message, **kwargs)
pass
def fail(self, **kwargs):
"""Print INFO message with FAIL."""
rectype = 'info'
message = self.messages['fail']
self.record(rectype, message, **kwargs)
pass
def restart(self):
"""Restart logging. Will open new file."""
self._start_date = dt.datetime.now()
self.__calculate_restart_date()
if self.root.file.status is True:
self.root.file.new()
if self.header.used is True:
self.head()
pass
def send(self, *args, **kwargs):
"""Send email message. Note that SMTP server connection must be
configured.
"""
self.root.email.send(*args, **kwargs)
pass
def set(self, **kwargs):
"""Update values in table. Note that DB connection must be
configured.
"""
self.root.table.write(**kwargs)
pass
def _exit(self):
# Inform about the error.
if self._alarming is True and self._with_error is True:
self.root.email.alarm()
pass
def __calculate_restart_date(self):
"""Calculate the date when logger must be restarted according to
maxdays parameter.
"""
self.__restart_date = (self._start_date
+ dt.timedelta(days=self._maxdays))
pass
def __check_file_stats(self):
"""Check the output file statistics to catch when current file must be
closed and new one must be opened.
"""
if self.root.file.status is True:
if self._maxsize is not False:
if self.root.file.size is not None:
if self.root.file.size > self._maxsize:
self.restart()
return
if self._maxdays is not False:
if self.__restart_date.day == dt.datetime.now().day:
self.restart()
return
| <filename>pypyrus_logbook/logger.py<gh_stars>0
import atexit
import datetime as dt
import os
import platform
import pypyrus_logbook as logbook
import sys
import time
import traceback
from .conf import all_loggers
from .formatter import Formatter
from .header import Header
from .output import Root
from .record import Record
from .sysinfo import Sysinfo
class Logger():
"""This class represents a single logger.
Logger by it self is a complex set of methods, items and commands that
together gives funcionality for advanced logging in different outputs:
console, file, email, database table, HTML document - and using information
from diffrent inputs: user messages, traceback, frames, user parameters,
execution arguments and systems descriptors.
Each logger must have an unique name which will help to identify it.
Main application logger will have the same name as a python script file.
It can be accessed by native logbook methods or by calling `getlogger()`
method with no name.
Parameters
----------
name : str, optional
The argument is used te define `name` attribute
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records. The default is True.
debug : bool, optional
The argument is used to filter debug records. The default is False.
warning : bool, optional
The argument is used to filter warning records. The default is True.
error : bool, optional
The argument is used to filter error records. The default is True.
critical : bool, optional
The argument is used to filter critical records. The default is True.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism. The
default is True.
control : bool, optional
The argument is used to enable or disable execution break in case
on error. The default is True.
maxsize : int or bool, optional
The argument is used to define maximum size of output file. Must be
presented as number of bytes. The default is 10 Mb.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file. The default is 1 which means that new output file
will be opened at each 00:00:00.
maxlevel : int or bool, optional
The argument is used to define the break error level (WARNING = 0,
ERRROR = 1, CRITICAL = 2). All that higher the break level will
interrupt application execution. The default is 1.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors. The default
is False which means it is disabled.
Attributes
----------
name : str
Name of the logger.
app : str
Name of the application that we are logging.
desc : str
Description of the application that we are logging.
version : str
Version of the application that we are logging.
start_date : datetime.datetime
Date when logging was started.
rectypes : dict
All available record types. Keys are used in `Logger` write methods as
`rectype` argument. Values are used in formatting. So if you wish to
modify `rectype` form then edit appropriate one here. If you wish to
use own record types then just add it to that dictinary. By default we
provide the next few record types:
+---------+---------+
| Key | Value |
+=========+=========+
|none |NONE |
+---------+---------+
|info |INFO |
+---------+---------+
|debug |DEBUG |
+---------+---------+
|warning |WARNING |
+---------+---------+
|error |ERROR |
+---------+---------+
|critical |CRITICAL |
+---------+---------+
messages : dict
Messages that are printed with some `Logger` methods like `ok()`,
`success()`, `fail()`. If you wish to modify the text of this messages
just edit the value of appropriate item.
with_errors : int
The flag shows that logger catched errors in the application during its
execution.
count_errors : int
Number of errors that logger catched in the application during its
execution.
filters : dict
Record types filters. To filter record type just set corresponding
item value to False.
root : pypyrus_logbook.output.Root
The output `Root` object.
console : pypyrus_logbook.output.Console
The output `Console` object. Shortcut for `Logger.root.console`.
file : pypyrus_logbook.output.File
The output file. Shortcut for `Logger.output.file`.
email : pypyrus_logbook.output.Email
The output email. Shortcut for `Logger.output.email`.
html: pypyrus_logbook.output.HTML
The output HTML document. Shortcut for `Logger.output.html`.
table: pypyrus_logbook.output.Table
The output table. Shortcut for `Logger.output.table`.
formatter : pypyrus_logbook.formatter.Formatter
Logger formatter which sets all formatting configuration like
record template, error message template, line length etc.
sysinfo : pypyrus_logbook.sysinfo.Sysinfo
Special input object which parse different inputs includeing system
specifications, flag arguments, execution parameters, user parameters
and environment variables and transforms all of that to `Dataset`
object. Through the `Dataset` object data can be easily accessed by
get item operation or by point like `sysinfo.desc['hostname']` or
`sysinfo.desc.hostname`.
header : pypyrus_logbook.header.Header
The header that can be printed to the writable output.
"""
def __init__(self, name=None, app=None, desc=None, version=None,
status=True, console=True, file=True, email=False, html=False,
table=False, directory=None, filename=None, extension=None,
smtp=None, db=None, format=None, info=True, debug=False,
warning=True, error=True, critical=True, alarming=True,
control=True, maxsize=(1024*1024*10), maxdays=1, maxlevel=2,
maxerrors=False):
# Unique name of the logger.
self._name = name
# Attributes describing the application.
self.app = None
self.desc = None
self.version = None
# Some logger important attributes
self._start_date = dt.datetime.now()
self.rectypes = {'none': 'NONE', 'info': 'INFO', 'debug': 'DEBUG',
'warning': 'WARNING', 'error': 'ERROR',
'critical': 'CRITICAL'}
self.messages = {'ok': 'OK', 'success': 'SUCCESS', 'fail': 'FAIL'}
self._with_error = False
self._count_errors = 0
# Complete the initial configuration.
self.configure(app=app, desc=desc, version=version, status=status,
console=console, file=file, email=email, html=html,
table=table, directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db, format=format,
info=info, debug=debug, warning=warning, error=error,
critical=critical, alarming=alarming, control=control,
maxsize=maxsize, maxdays=maxdays, maxlevel=maxlevel,
maxerrors=maxerrors)
# Output shortcuts.
self.console = self.root.console
self.file = self.root.file
self.email = self.root.email
self.html = self.root.html
self.table = self.root.table
# Set exit function.
atexit.register(self._exit)
# Add creating logger to special all_loggers dictinary.
all_loggers[self._name] = self
pass
def __str__(self):
return f'<Logger object "{self._name}">'
__repr__ = __str__
@property
def name(self):
"""Unique logger name."""
return self._name
@property
def start_date(self):
"""Logging start date."""
return self._start_date
@property
def with_error(self):
"""Flag that shows was an error or not."""
return self._with_error
@property
def count_errors(self):
"""The number of occured errors."""
return self._count_errors
def configure(self, app=None, desc=None, version=None, status=None,
console=None, file=None, email=None, html=None, table=None,
directory=None, filename=None, extension=None, smtp=None,
db=None, format=None, info=None, debug=None, warning=None,
error=None, critical=None, alarming=None, control=None,
maxsize=None, maxdays=None, maxlevel=None, maxerrors=None):
"""Main method to configure the logger and all its attributes.
This is an only one right way to customize logger. Parameters are the
same as for creatrion.
Parameters
----------
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records.
debug : bool, optional
The argument is used to filter debug records.
warning : bool, optional
The argument is used to filter warning records.
error : bool, optional
The argument is used to filter error records.
critical : bool, optional
The argument is used to filter critical records.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism.
control : bool, optional
The argument is used to enable or disable execution break in case
on error.
maxsize : int or bool, optional
The argument is used to define maximum size of output file.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file.
maxlevel : int or bool, optional
The argument is used to define the break error level.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors.
"""
if isinstance(app, str) is True: self.app = app
if isinstance(desc, str) is True: self.desc = desc
if isinstance(version, (str, int, float)) is True:
self.version = version
# Build the output root if it is not exists. In other case modify
# existing output if it is requested.
if hasattr(self, 'root') is False:
self.root = Root(self, console=console, file=file, email=email,
html=html, table=table, status=status,
directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db)
else:
for key, value in {'console': console, 'file': file,
'email': email, 'html': html,
'table': table}.items():
if value is True:
getattr(self.root, key).open()
if key == 'file':
getattr(self.root, key).new()
elif value is False:
getattr(self.root, key).close()
# Customize output file path.
path = {}
if directory is not None: path['dir'] = directory
if filename is not None: path['name'] = filename
if extension is not None: path['ext'] = extension
if len(path) > 0:
self.root.file.configure(**path)
# Customize SMTP server.
if isinstance(smtp, dict) is True:
self.root.email.configure(**smtp)
# Customize database connection.
if isinstance(db, dict) is True:
self.root.table.configure(**db)
# Create formatter in case it is not exists yet or just customize it.
# Parameter format can be either string or dictionary.
# When it is string then it must describe records format.
# When it is dictionary it can contaion any parameter of formatter
# that must be customized.
if isinstance(format, str) is True:
format = {'record': format}
if hasattr(self, 'formatter') is False:
format = {} if isinstance(format, dict) is False else format
self.formatter = Formatter(**format)
elif isinstance(format, dict) is True:
self.formatter.configure(**format)
# Create or customize record type filters.
if hasattr(self, 'filters') is False:
self.filters = {}
for key, value in {'info': info, 'debug': debug, 'error': error,
'warning': warning, 'critical': critical}.items():
if isinstance(value, bool) is True:
self.filters[key] = value
# Customize limits and parameters of execution behaviour.
if isinstance(maxsize, (int, float, bool)) is True:
self._maxsize = maxsize
if isinstance(maxdays, (int, float, bool)) is True:
self._maxdays = maxdays
self.__calculate_restart_date()
if isinstance(maxlevel, (int, float, bool)) is True:
self._maxlevel = maxlevel
if isinstance(maxerrors, (int, float, bool)) is True:
self._maxerrors = maxerrors
if isinstance(alarming, bool) is True:
self._alarming = alarming
if isinstance(control, bool) is True:
self._control = control
# Initialize sysinfo instance when not exists.
if hasattr(self, 'sysinfo') is False:
self.sysinfo = Sysinfo(self)
# Initialize header instance when not exists.
if hasattr(self, 'header') is False:
self.header = Header(self)
pass
def write(self, record):
"""Direct write to the output.
Parameters
----------
record : Record
The argument is used to send it to the output `root`.
"""
self.__check_file_stats()
self.root.write(record)
pass
def record(self, rectype, message, error=False, **kwargs):
"""Basic method to write records.
Parameters
----------
rectype : str
By default method creates the record with the type NONE.
That can be changed but depends on available record types.
All registered record types are stored in the instance attribute
rectypes. If you wish to use own record type or change the
presentaion of exeisting one then edit this dictinary.
message : str
The message that must be written.
error : bool, optional
If record is error then set that parameter to `True`.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
if self.filters.get(rectype, True) is True:
record = Record(self, rectype, message, error=error, **kwargs)
self.write(record)
pass
def info(self, message, **kwargs):
"""Send INFO record to output."""
rectype = 'info'
self.record(rectype, message, **kwargs)
pass
def debug(self, message, **kwargs):
"""Send DEBUG record to the output."""
rectype = 'debug'
self.record(rectype, message, **kwargs)
pass
def error(self, message=None, rectype='error', format=None, alarming=False,
level=1, **kwargs):
"""Send ERROR record to the output.
If exception in current traceback exists then method will format the
exception according to `formatter.error` string presentation. If
`formatter.error` is set to `False` the exception will be just printed
in original Python style.
Also method will send an alarm if alarming attribute is `True`, email
output is enabled and SMTP server is configurated.
If one of the limit triggers worked then application will be aborted.
Parameters
----------
message : str, optional
The message that must be written instead of exception.
rectype : str, optional
The type of error according to `rectypes` dictionary.
format : str, optional
The format of the error message.
alarming : bool
The argument is used to enable or disable the alarming mechanism
for this certain call.
level : int
The argument is used to describe the error level.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
self._with_error = True
self._count_errors += 1
format = self.formatter.error if format is None else format
# Parse the error.
err_type, err_value, err_tb = sys.exc_info()
if message is None and err_type is not None:
if isinstance(format, str) is True:
err_name = err_type.__name__
err_value = err_value
for tb in traceback.walk_tb(err_tb):
f_code = tb[0].f_code
err_file = os.path.abspath(f_code.co_filename)
err_line = tb[1]
err_obj = f_code.co_name
self.record(rectype, message, error=True,
err_name=err_name, err_value=err_value,
err_file=err_file, err_line=err_line,
err_obj=err_obj, **kwargs)
elif format is False:
exception = traceback.format_exception(err_type, err_value,
err_tb)
message = '\n'
message += ''.join(exception)
self.record(rectype, message, **kwargs)
else:
message = message or ''
self.record(rectype, message, **kwargs)
# Break execution in case of critical error if permitted.
# The alarm will be generated at exit if it is configured.
if self._control is True:
if level >= self._maxlevel:
sys.exit()
if self._maxerrors is not False:
if self._count_errors > self._maxerrors:
sys.exit()
# Send alarm if execution was not aborted but alarm is needed.
if alarming is True:
self.root.email.alarm()
pass
def warning(self, message=None, **kwargs):
"""Send WARNING error record to the output."""
self.error(message, rectype='warning', level=0, **kwargs)
pass
def critical(self, message=None, **kwargs):
"""Send CRITICAL error record to the output."""
self.error(message, rectype='critical', level=2, **kwargs)
pass
def head(self):
"""Send header to the output."""
string = self.header.create()
self.write(string)
pass
def subhead(self, string):
"""Send subheader as upper-case text between two border lines to the
output.
Parameters
----------
string : str
The text that will be presented as subheader.
"""
bound = f'{self.formatter.div*self.formatter.length}\n'
string = f'{bound}\t{string}\n{bound}'.upper()
self.write(string)
pass
def line(self, message):
"""Send raw text with the new line to the output.
Parameters
----------
message : str
The message that must be written.
"""
self.write(f'{message}\n')
pass
def bound(self, div=None, length=None):
"""Write horizontal border in the output. Useful when need to separate
different blocks of information.
Parameters
----------
div : str, optional
Symbol that is used to bulid the bound.
length : int, optional
Lenght of the bound.
"""
border = self.formatter.div * self.formatter.length
self.write(border + '\n')
pass
def blank(self, number=1):
"""Write blank lines in the output.
Parameters
----------
number : int, optional
The number of the blank lines that must be written.
"""
string = '\n'*number
self.write(string)
pass
def ok(self, **kwargs):
"""Print INFO message with OK."""
rectype = 'info'
message = self.messages['ok']
self.record(rectype, message, **kwargs)
pass
def success(self, **kwargs):
"""Print INFO message with SUCCESS."""
rectype = 'info'
message = self.messages['success']
self.record(rectype, message, **kwargs)
pass
def fail(self, **kwargs):
"""Print INFO message with FAIL."""
rectype = 'info'
message = self.messages['fail']
self.record(rectype, message, **kwargs)
pass
def restart(self):
"""Restart logging. Will open new file."""
self._start_date = dt.datetime.now()
self.__calculate_restart_date()
if self.root.file.status is True:
self.root.file.new()
if self.header.used is True:
self.head()
pass
def send(self, *args, **kwargs):
"""Send email message. Note that SMTP server connection must be
configured.
"""
self.root.email.send(*args, **kwargs)
pass
def set(self, **kwargs):
"""Update values in table. Note that DB connection must be
configured.
"""
self.root.table.write(**kwargs)
pass
def _exit(self):
# Inform about the error.
if self._alarming is True and self._with_error is True:
self.root.email.alarm()
pass
def __calculate_restart_date(self):
"""Calculate the date when logger must be restarted according to
maxdays parameter.
"""
self.__restart_date = (self._start_date
+ dt.timedelta(days=self._maxdays))
pass
def __check_file_stats(self):
"""Check the output file statistics to catch when current file must be
closed and new one must be opened.
"""
if self.root.file.status is True:
if self._maxsize is not False:
if self.root.file.size is not None:
if self.root.file.size > self._maxsize:
self.restart()
return
if self._maxdays is not False:
if self.__restart_date.day == dt.datetime.now().day:
self.restart()
return
| en | 0.667552 | This class represents a single logger. Logger by it self is a complex set of methods, items and commands that together gives funcionality for advanced logging in different outputs: console, file, email, database table, HTML document - and using information from diffrent inputs: user messages, traceback, frames, user parameters, execution arguments and systems descriptors. Each logger must have an unique name which will help to identify it. Main application logger will have the same name as a python script file. It can be accessed by native logbook methods or by calling `getlogger()` method with no name. Parameters ---------- name : str, optional The argument is used te define `name` attribute app : str, optional The argument is used to set the `app` attribute. desc : str, optional The argument is used to set the `desc` attribute. version : str, optional The argument is used to set the `version` attribute. status : bool, optional The argument is used to open or close output `root`. console : bool, optional The argument is used to open or close output `console`. file : bool, optional The argument is used to open or close output `file`. email : bool, optional The argument is used to open or close output `email`. html : bool, optional The argument is used to open or close output `html`. table : bool, optional The argument is used to open or close output `table`. directory : str, optional The argument is used to set logging file folder. filename : str, optional The argument is used to set logging file name. extension : str, optional The argument is used to set logging file extension. smtp : dict, optional The argument is used to configure SMTP connection. db : dict, optional The argument is used to configure DB connection. format : str, optional The argument is used to set record template. info : bool, optional The argument is used to filter info records. The default is True. debug : bool, optional The argument is used to filter debug records. The default is False. warning : bool, optional The argument is used to filter warning records. The default is True. error : bool, optional The argument is used to filter error records. The default is True. critical : bool, optional The argument is used to filter critical records. The default is True. alarming : bool, optional The argument is used to enable or disable alarming mechanism. The default is True. control : bool, optional The argument is used to enable or disable execution break in case on error. The default is True. maxsize : int or bool, optional The argument is used to define maximum size of output file. Must be presented as number of bytes. The default is 10 Mb. maxdays : int or bool, optional The argument is used to define maximum number of days that will be logged to same file. The default is 1 which means that new output file will be opened at each 00:00:00. maxlevel : int or bool, optional The argument is used to define the break error level (WARNING = 0, ERRROR = 1, CRITICAL = 2). All that higher the break level will interrupt application execution. The default is 1. maxerrors : int or bool, optional The argument is used to define maximun number of errors. The default is False which means it is disabled. Attributes ---------- name : str Name of the logger. app : str Name of the application that we are logging. desc : str Description of the application that we are logging. version : str Version of the application that we are logging. start_date : datetime.datetime Date when logging was started. rectypes : dict All available record types. Keys are used in `Logger` write methods as `rectype` argument. Values are used in formatting. So if you wish to modify `rectype` form then edit appropriate one here. If you wish to use own record types then just add it to that dictinary. By default we provide the next few record types: +---------+---------+ | Key | Value | +=========+=========+ |none |NONE | +---------+---------+ |info |INFO | +---------+---------+ |debug |DEBUG | +---------+---------+ |warning |WARNING | +---------+---------+ |error |ERROR | +---------+---------+ |critical |CRITICAL | +---------+---------+ messages : dict Messages that are printed with some `Logger` methods like `ok()`, `success()`, `fail()`. If you wish to modify the text of this messages just edit the value of appropriate item. with_errors : int The flag shows that logger catched errors in the application during its execution. count_errors : int Number of errors that logger catched in the application during its execution. filters : dict Record types filters. To filter record type just set corresponding item value to False. root : pypyrus_logbook.output.Root The output `Root` object. console : pypyrus_logbook.output.Console The output `Console` object. Shortcut for `Logger.root.console`. file : pypyrus_logbook.output.File The output file. Shortcut for `Logger.output.file`. email : pypyrus_logbook.output.Email The output email. Shortcut for `Logger.output.email`. html: pypyrus_logbook.output.HTML The output HTML document. Shortcut for `Logger.output.html`. table: pypyrus_logbook.output.Table The output table. Shortcut for `Logger.output.table`. formatter : pypyrus_logbook.formatter.Formatter Logger formatter which sets all formatting configuration like record template, error message template, line length etc. sysinfo : pypyrus_logbook.sysinfo.Sysinfo Special input object which parse different inputs includeing system specifications, flag arguments, execution parameters, user parameters and environment variables and transforms all of that to `Dataset` object. Through the `Dataset` object data can be easily accessed by get item operation or by point like `sysinfo.desc['hostname']` or `sysinfo.desc.hostname`. header : pypyrus_logbook.header.Header The header that can be printed to the writable output. # Unique name of the logger. # Attributes describing the application. # Some logger important attributes # Complete the initial configuration. # Output shortcuts. # Set exit function. # Add creating logger to special all_loggers dictinary. Unique logger name. Logging start date. Flag that shows was an error or not. The number of occured errors. Main method to configure the logger and all its attributes. This is an only one right way to customize logger. Parameters are the same as for creatrion. Parameters ---------- app : str, optional The argument is used to set the `app` attribute. desc : str, optional The argument is used to set the `desc` attribute. version : str, optional The argument is used to set the `version` attribute. status : bool, optional The argument is used to open or close output `root`. console : bool, optional The argument is used to open or close output `console`. file : bool, optional The argument is used to open or close output `file`. email : bool, optional The argument is used to open or close output `email`. html : bool, optional The argument is used to open or close output `html`. table : bool, optional The argument is used to open or close output `table`. directory : str, optional The argument is used to set logging file folder. filename : str, optional The argument is used to set logging file name. extension : str, optional The argument is used to set logging file extension. smtp : dict, optional The argument is used to configure SMTP connection. db : dict, optional The argument is used to configure DB connection. format : str, optional The argument is used to set record template. info : bool, optional The argument is used to filter info records. debug : bool, optional The argument is used to filter debug records. warning : bool, optional The argument is used to filter warning records. error : bool, optional The argument is used to filter error records. critical : bool, optional The argument is used to filter critical records. alarming : bool, optional The argument is used to enable or disable alarming mechanism. control : bool, optional The argument is used to enable or disable execution break in case on error. maxsize : int or bool, optional The argument is used to define maximum size of output file. maxdays : int or bool, optional The argument is used to define maximum number of days that will be logged to same file. maxlevel : int or bool, optional The argument is used to define the break error level. maxerrors : int or bool, optional The argument is used to define maximun number of errors. # Build the output root if it is not exists. In other case modify # existing output if it is requested. # Customize output file path. # Customize SMTP server. # Customize database connection. # Create formatter in case it is not exists yet or just customize it. # Parameter format can be either string or dictionary. # When it is string then it must describe records format. # When it is dictionary it can contaion any parameter of formatter # that must be customized. # Create or customize record type filters. # Customize limits and parameters of execution behaviour. # Initialize sysinfo instance when not exists. # Initialize header instance when not exists. Direct write to the output. Parameters ---------- record : Record The argument is used to send it to the output `root`. Basic method to write records. Parameters ---------- rectype : str By default method creates the record with the type NONE. That can be changed but depends on available record types. All registered record types are stored in the instance attribute rectypes. If you wish to use own record type or change the presentaion of exeisting one then edit this dictinary. message : str The message that must be written. error : bool, optional If record is error then set that parameter to `True`. **kwargs The keyword arguments used for additional forms (variables) for record and message formatting. Send INFO record to output. Send DEBUG record to the output. Send ERROR record to the output. If exception in current traceback exists then method will format the exception according to `formatter.error` string presentation. If `formatter.error` is set to `False` the exception will be just printed in original Python style. Also method will send an alarm if alarming attribute is `True`, email output is enabled and SMTP server is configurated. If one of the limit triggers worked then application will be aborted. Parameters ---------- message : str, optional The message that must be written instead of exception. rectype : str, optional The type of error according to `rectypes` dictionary. format : str, optional The format of the error message. alarming : bool The argument is used to enable or disable the alarming mechanism for this certain call. level : int The argument is used to describe the error level. **kwargs The keyword arguments used for additional forms (variables) for record and message formatting. # Parse the error. # Break execution in case of critical error if permitted. # The alarm will be generated at exit if it is configured. # Send alarm if execution was not aborted but alarm is needed. Send WARNING error record to the output. Send CRITICAL error record to the output. Send header to the output. Send subheader as upper-case text between two border lines to the output. Parameters ---------- string : str The text that will be presented as subheader. Send raw text with the new line to the output. Parameters ---------- message : str The message that must be written. Write horizontal border in the output. Useful when need to separate different blocks of information. Parameters ---------- div : str, optional Symbol that is used to bulid the bound. length : int, optional Lenght of the bound. Write blank lines in the output. Parameters ---------- number : int, optional The number of the blank lines that must be written. Print INFO message with OK. Print INFO message with SUCCESS. Print INFO message with FAIL. Restart logging. Will open new file. Send email message. Note that SMTP server connection must be configured. Update values in table. Note that DB connection must be configured. # Inform about the error. Calculate the date when logger must be restarted according to maxdays parameter. Check the output file statistics to catch when current file must be closed and new one must be opened. | 2.861678 | 3 |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py | revnav/sandbox | 0 | 780 | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
MEBIBYTE = 1024 * 1024
STREAMING_DEFAULT_PART_SIZE = 10 * MEBIBYTE
DEFAULT_PART_SIZE = 128 * MEBIBYTE
OBJECT_USE_MULTIPART_SIZE = 128 * MEBIBYTE
| # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
MEBIBYTE = 1024 * 1024
STREAMING_DEFAULT_PART_SIZE = 10 * MEBIBYTE
DEFAULT_PART_SIZE = 128 * MEBIBYTE
OBJECT_USE_MULTIPART_SIZE = 128 * MEBIBYTE
| en | 0.875988 | # coding: utf-8 # Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. | 0.885054 | 1 |
scraper/news/spiders/millardayo.py | ZendaInnocent/news-api | 3 | 781 | <filename>scraper/news/spiders/millardayo.py
# Spider for MillardAyo.com
import scrapy
from bs4 import BeautifulSoup
class MillardAyoSpider(scrapy.Spider):
name = 'millardayo'
allowed_urls = ['www.millardayo.com']
start_urls = [
'https://millardayo.com',
]
def parse(self, response, **kwargs):
# extracting data - link, image, title, excerpt
soup = BeautifulSoup(response.body, 'lxml')
posts = soup.find_all('li', {'class': 'infinite-post'})
for post in posts:
try:
yield {
'image_url': post.find('img').get('src'),
'link': post.find('a').get('href'),
'title': post.find('a').get('title'),
'excerpt': post.find('p').get_text(),
'source': 'millardayo',
}
except AttributeError:
pass
next_page = soup.find('a', text='Next ›').get('href')
if next_page:
yield response.follow(next_page, callback=self.parse)
| <filename>scraper/news/spiders/millardayo.py
# Spider for MillardAyo.com
import scrapy
from bs4 import BeautifulSoup
class MillardAyoSpider(scrapy.Spider):
name = 'millardayo'
allowed_urls = ['www.millardayo.com']
start_urls = [
'https://millardayo.com',
]
def parse(self, response, **kwargs):
# extracting data - link, image, title, excerpt
soup = BeautifulSoup(response.body, 'lxml')
posts = soup.find_all('li', {'class': 'infinite-post'})
for post in posts:
try:
yield {
'image_url': post.find('img').get('src'),
'link': post.find('a').get('href'),
'title': post.find('a').get('title'),
'excerpt': post.find('p').get_text(),
'source': 'millardayo',
}
except AttributeError:
pass
next_page = soup.find('a', text='Next ›').get('href')
if next_page:
yield response.follow(next_page, callback=self.parse)
| en | 0.419692 | # Spider for MillardAyo.com # extracting data - link, image, title, excerpt | 2.996513 | 3 |
sdks/python/apache_beam/runners/portability/expansion_service_test.py | stephenoken/beam | 3 | 782 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import signal
import sys
import typing
import grpc
from past.builtins import unicode
import apache_beam as beam
import apache_beam.transforms.combiners as combine
from apache_beam.coders import StrUtf8Coder
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.portability.api.external_transforms_pb2 import ExternalConfigurationPayload
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import ptransform
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
# This script provides an expansion service and example ptransforms for running
# external transform test cases. See external_test.py for details.
_LOGGER = logging.getLogger(__name__)
TEST_PREFIX_URN = "beam:transforms:xlang:test:prefix"
TEST_MULTI_URN = "beam:transforms:xlang:test:multi"
TEST_GBK_URN = "beam:transforms:xlang:test:gbk"
TEST_CGBK_URN = "beam:transforms:xlang:test:cgbk"
TEST_COMGL_URN = "beam:transforms:xlang:test:comgl"
TEST_COMPK_URN = "beam:transforms:xlang:test:compk"
TEST_FLATTEN_URN = "beam:transforms:xlang:test:flatten"
TEST_PARTITION_URN = "beam:transforms:xlang:test:partition"
@ptransform.PTransform.register_urn('beam:transforms:xlang:count', None)
class CountPerElementTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll | combine.Count.PerElement()
def to_runner_api_parameter(self, unused_context):
return 'beam:transforms:xlang:count', None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CountPerElementTransform()
@ptransform.PTransform.register_urn(
'beam:transforms:xlang:filter_less_than_eq', bytes)
class FilterLessThanTransform(ptransform.PTransform):
def __init__(self, payload):
self._payload = payload
def expand(self, pcoll):
return (
pcoll | beam.Filter(
lambda elem, target: elem <= target, int(ord(self._payload[0]))))
def to_runner_api_parameter(self, unused_context):
return (
'beam:transforms:xlang:filter_less_than', self._payload.encode('utf8'))
@staticmethod
def from_runner_api_parameter(unused_ptransform, payload, unused_context):
return FilterLessThanTransform(payload.decode('utf8'))
@ptransform.PTransform.register_urn(TEST_PREFIX_URN, None)
@beam.typehints.with_output_types(unicode)
class PrefixTransform(ptransform.PTransform):
def __init__(self, payload):
self._payload = payload
def expand(self, pcoll):
return pcoll | 'TestLabel' >> beam.Map(
lambda x: '{}{}'.format(self._payload, x))
def to_runner_api_parameter(self, unused_context):
return TEST_PREFIX_URN, ImplicitSchemaPayloadBuilder(
{'data': self._payload}).payload()
@staticmethod
def from_runner_api_parameter(unused_ptransform, payload, unused_context):
return PrefixTransform(parse_string_payload(payload)['data'])
@ptransform.PTransform.register_urn(TEST_MULTI_URN, None)
class MutltiTransform(ptransform.PTransform):
def expand(self, pcolls):
return {
'main': (pcolls['main1'], pcolls['main2'])
| beam.Flatten()
| beam.Map(lambda x, s: x + s, beam.pvalue.AsSingleton(
pcolls['side'])).with_output_types(unicode),
'side': pcolls['side']
| beam.Map(lambda x: x + x).with_output_types(unicode),
}
def to_runner_api_parameter(self, unused_context):
return TEST_MULTI_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return MutltiTransform()
@ptransform.PTransform.register_urn(TEST_GBK_URN, None)
class GBKTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll | 'TestLabel' >> beam.GroupByKey()
def to_runner_api_parameter(self, unused_context):
return TEST_GBK_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return GBKTransform()
@ptransform.PTransform.register_urn(TEST_CGBK_URN, None)
class CoGBKTransform(ptransform.PTransform):
class ConcatFn(beam.DoFn):
def process(self, element):
(k, v) = element
return [(k, v['col1'] + v['col2'])]
def expand(self, pcoll):
return pcoll \
| beam.CoGroupByKey() \
| beam.ParDo(self.ConcatFn()).with_output_types(
typing.Tuple[int, typing.Iterable[unicode]])
def to_runner_api_parameter(self, unused_context):
return TEST_CGBK_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CoGBKTransform()
@ptransform.PTransform.register_urn(TEST_COMGL_URN, None)
class CombineGloballyTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll \
| beam.CombineGlobally(sum).with_output_types(int)
def to_runner_api_parameter(self, unused_context):
return TEST_COMGL_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CombineGloballyTransform()
@ptransform.PTransform.register_urn(TEST_COMPK_URN, None)
class CombinePerKeyTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll \
| beam.CombinePerKey(sum).with_output_types(
typing.Tuple[unicode, int])
def to_runner_api_parameter(self, unused_context):
return TEST_COMPK_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CombinePerKeyTransform()
@ptransform.PTransform.register_urn(TEST_FLATTEN_URN, None)
class FlattenTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll.values() | beam.Flatten().with_output_types(int)
def to_runner_api_parameter(self, unused_context):
return TEST_FLATTEN_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return FlattenTransform()
@ptransform.PTransform.register_urn(TEST_PARTITION_URN, None)
class PartitionTransform(ptransform.PTransform):
def expand(self, pcoll):
col1, col2 = pcoll | beam.Partition(
lambda elem, n: 0 if elem % 2 == 0 else 1, 2)
typed_col1 = col1 | beam.Map(lambda x: x).with_output_types(int)
typed_col2 = col2 | beam.Map(lambda x: x).with_output_types(int)
return {'0': typed_col1, '1': typed_col2}
def to_runner_api_parameter(self, unused_context):
return TEST_PARTITION_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return PartitionTransform()
@ptransform.PTransform.register_urn('payload', bytes)
class PayloadTransform(ptransform.PTransform):
def __init__(self, payload):
self._payload = payload
def expand(self, pcoll):
return pcoll | beam.Map(lambda x, s: x + s, self._payload)
def to_runner_api_parameter(self, unused_context):
return b'payload', self._payload.encode('ascii')
@staticmethod
def from_runner_api_parameter(unused_ptransform, payload, unused_context):
return PayloadTransform(payload.decode('ascii'))
@ptransform.PTransform.register_urn('fib', bytes)
class FibTransform(ptransform.PTransform):
def __init__(self, level):
self._level = level
def expand(self, p):
if self._level <= 2:
return p | beam.Create([1])
else:
a = p | 'A' >> beam.ExternalTransform(
'fib',
str(self._level - 1).encode('ascii'),
expansion_service.ExpansionServiceServicer())
b = p | 'B' >> beam.ExternalTransform(
'fib',
str(self._level - 2).encode('ascii'),
expansion_service.ExpansionServiceServicer())
return ((a, b)
| beam.Flatten()
| beam.CombineGlobally(sum).without_defaults())
def to_runner_api_parameter(self, unused_context):
return 'fib', str(self._level).encode('ascii')
@staticmethod
def from_runner_api_parameter(unused_ptransform, level, unused_context):
return FibTransform(int(level.decode('ascii')))
def parse_string_payload(input_byte):
payload = ExternalConfigurationPayload()
payload.ParseFromString(input_byte)
coder = StrUtf8Coder()
return {
k: coder.decode_nested(v.payload)
for k,
v in payload.configuration.items()
}
server = None
def cleanup(unused_signum, unused_frame):
_LOGGER.info('Shutting down expansion service.')
server.stop(None)
def main(unused_argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', type=int, help='port on which to serve the job api')
options = parser.parse_args()
global server
server = grpc.server(UnboundedThreadPoolExecutor())
beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server(
expansion_service.ExpansionServiceServicer(
PipelineOptions(
["--experiments", "beam_fn_api", "--sdk_location", "container"])),
server)
server.add_insecure_port('localhost:{}'.format(options.port))
server.start()
_LOGGER.info('Listening for expansion requests at %d', options.port)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
# blocking main thread forever.
signal.pause()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
| #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import signal
import sys
import typing
import grpc
from past.builtins import unicode
import apache_beam as beam
import apache_beam.transforms.combiners as combine
from apache_beam.coders import StrUtf8Coder
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.portability.api.external_transforms_pb2 import ExternalConfigurationPayload
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import ptransform
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
# This script provides an expansion service and example ptransforms for running
# external transform test cases. See external_test.py for details.
_LOGGER = logging.getLogger(__name__)
TEST_PREFIX_URN = "beam:transforms:xlang:test:prefix"
TEST_MULTI_URN = "beam:transforms:xlang:test:multi"
TEST_GBK_URN = "beam:transforms:xlang:test:gbk"
TEST_CGBK_URN = "beam:transforms:xlang:test:cgbk"
TEST_COMGL_URN = "beam:transforms:xlang:test:comgl"
TEST_COMPK_URN = "beam:transforms:xlang:test:compk"
TEST_FLATTEN_URN = "beam:transforms:xlang:test:flatten"
TEST_PARTITION_URN = "beam:transforms:xlang:test:partition"
@ptransform.PTransform.register_urn('beam:transforms:xlang:count', None)
class CountPerElementTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll | combine.Count.PerElement()
def to_runner_api_parameter(self, unused_context):
return 'beam:transforms:xlang:count', None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CountPerElementTransform()
@ptransform.PTransform.register_urn(
'beam:transforms:xlang:filter_less_than_eq', bytes)
class FilterLessThanTransform(ptransform.PTransform):
def __init__(self, payload):
self._payload = payload
def expand(self, pcoll):
return (
pcoll | beam.Filter(
lambda elem, target: elem <= target, int(ord(self._payload[0]))))
def to_runner_api_parameter(self, unused_context):
return (
'beam:transforms:xlang:filter_less_than', self._payload.encode('utf8'))
@staticmethod
def from_runner_api_parameter(unused_ptransform, payload, unused_context):
return FilterLessThanTransform(payload.decode('utf8'))
@ptransform.PTransform.register_urn(TEST_PREFIX_URN, None)
@beam.typehints.with_output_types(unicode)
class PrefixTransform(ptransform.PTransform):
def __init__(self, payload):
self._payload = payload
def expand(self, pcoll):
return pcoll | 'TestLabel' >> beam.Map(
lambda x: '{}{}'.format(self._payload, x))
def to_runner_api_parameter(self, unused_context):
return TEST_PREFIX_URN, ImplicitSchemaPayloadBuilder(
{'data': self._payload}).payload()
@staticmethod
def from_runner_api_parameter(unused_ptransform, payload, unused_context):
return PrefixTransform(parse_string_payload(payload)['data'])
@ptransform.PTransform.register_urn(TEST_MULTI_URN, None)
class MutltiTransform(ptransform.PTransform):
def expand(self, pcolls):
return {
'main': (pcolls['main1'], pcolls['main2'])
| beam.Flatten()
| beam.Map(lambda x, s: x + s, beam.pvalue.AsSingleton(
pcolls['side'])).with_output_types(unicode),
'side': pcolls['side']
| beam.Map(lambda x: x + x).with_output_types(unicode),
}
def to_runner_api_parameter(self, unused_context):
return TEST_MULTI_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return MutltiTransform()
@ptransform.PTransform.register_urn(TEST_GBK_URN, None)
class GBKTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll | 'TestLabel' >> beam.GroupByKey()
def to_runner_api_parameter(self, unused_context):
return TEST_GBK_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return GBKTransform()
@ptransform.PTransform.register_urn(TEST_CGBK_URN, None)
class CoGBKTransform(ptransform.PTransform):
class ConcatFn(beam.DoFn):
def process(self, element):
(k, v) = element
return [(k, v['col1'] + v['col2'])]
def expand(self, pcoll):
return pcoll \
| beam.CoGroupByKey() \
| beam.ParDo(self.ConcatFn()).with_output_types(
typing.Tuple[int, typing.Iterable[unicode]])
def to_runner_api_parameter(self, unused_context):
return TEST_CGBK_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CoGBKTransform()
@ptransform.PTransform.register_urn(TEST_COMGL_URN, None)
class CombineGloballyTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll \
| beam.CombineGlobally(sum).with_output_types(int)
def to_runner_api_parameter(self, unused_context):
return TEST_COMGL_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CombineGloballyTransform()
@ptransform.PTransform.register_urn(TEST_COMPK_URN, None)
class CombinePerKeyTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll \
| beam.CombinePerKey(sum).with_output_types(
typing.Tuple[unicode, int])
def to_runner_api_parameter(self, unused_context):
return TEST_COMPK_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return CombinePerKeyTransform()
@ptransform.PTransform.register_urn(TEST_FLATTEN_URN, None)
class FlattenTransform(ptransform.PTransform):
def expand(self, pcoll):
return pcoll.values() | beam.Flatten().with_output_types(int)
def to_runner_api_parameter(self, unused_context):
return TEST_FLATTEN_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return FlattenTransform()
@ptransform.PTransform.register_urn(TEST_PARTITION_URN, None)
class PartitionTransform(ptransform.PTransform):
def expand(self, pcoll):
col1, col2 = pcoll | beam.Partition(
lambda elem, n: 0 if elem % 2 == 0 else 1, 2)
typed_col1 = col1 | beam.Map(lambda x: x).with_output_types(int)
typed_col2 = col2 | beam.Map(lambda x: x).with_output_types(int)
return {'0': typed_col1, '1': typed_col2}
def to_runner_api_parameter(self, unused_context):
return TEST_PARTITION_URN, None
@staticmethod
def from_runner_api_parameter(
unused_ptransform, unused_parameter, unused_context):
return PartitionTransform()
@ptransform.PTransform.register_urn('payload', bytes)
class PayloadTransform(ptransform.PTransform):
def __init__(self, payload):
self._payload = payload
def expand(self, pcoll):
return pcoll | beam.Map(lambda x, s: x + s, self._payload)
def to_runner_api_parameter(self, unused_context):
return b'payload', self._payload.encode('ascii')
@staticmethod
def from_runner_api_parameter(unused_ptransform, payload, unused_context):
return PayloadTransform(payload.decode('ascii'))
@ptransform.PTransform.register_urn('fib', bytes)
class FibTransform(ptransform.PTransform):
def __init__(self, level):
self._level = level
def expand(self, p):
if self._level <= 2:
return p | beam.Create([1])
else:
a = p | 'A' >> beam.ExternalTransform(
'fib',
str(self._level - 1).encode('ascii'),
expansion_service.ExpansionServiceServicer())
b = p | 'B' >> beam.ExternalTransform(
'fib',
str(self._level - 2).encode('ascii'),
expansion_service.ExpansionServiceServicer())
return ((a, b)
| beam.Flatten()
| beam.CombineGlobally(sum).without_defaults())
def to_runner_api_parameter(self, unused_context):
return 'fib', str(self._level).encode('ascii')
@staticmethod
def from_runner_api_parameter(unused_ptransform, level, unused_context):
return FibTransform(int(level.decode('ascii')))
def parse_string_payload(input_byte):
payload = ExternalConfigurationPayload()
payload.ParseFromString(input_byte)
coder = StrUtf8Coder()
return {
k: coder.decode_nested(v.payload)
for k,
v in payload.configuration.items()
}
server = None
def cleanup(unused_signum, unused_frame):
_LOGGER.info('Shutting down expansion service.')
server.stop(None)
def main(unused_argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', type=int, help='port on which to serve the job api')
options = parser.parse_args()
global server
server = grpc.server(UnboundedThreadPoolExecutor())
beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server(
expansion_service.ExpansionServiceServicer(
PipelineOptions(
["--experiments", "beam_fn_api", "--sdk_location", "container"])),
server)
server.add_insecure_port('localhost:{}'.format(options.port))
server.start()
_LOGGER.info('Listening for expansion requests at %d', options.port)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
# blocking main thread forever.
signal.pause()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
| en | 0.830501 | # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pytype: skip-file # This script provides an expansion service and example ptransforms for running # external transform test cases. See external_test.py for details. # blocking main thread forever. | 1.678757 | 2 |
db.py | RunnerPro/RunnerProApi | 0 | 783 | <reponame>RunnerPro/RunnerProApi
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from settings import DB_URI
Session = sessionmaker(autocommit=False, autoflush=False, bind=create_engine(DB_URI))
session = scoped_session(Session)
| from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from settings import DB_URI
Session = sessionmaker(autocommit=False, autoflush=False, bind=create_engine(DB_URI))
session = scoped_session(Session) | none | 1 | 1.896408 | 2 |
|
python/tvm/contrib/nvcc.py | ntanhbk44/tvm | 0 | 784 | <filename>python/tvm/contrib/nvcc.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke nvcc compiler in the system"""
from __future__ import absolute_import as _abs
import subprocess
import os
import warnings
import tvm._ffi
from tvm.runtime import ndarray as nd
from . import utils
from .._ffi.base import py_str
def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None):
"""Compile cuda code with NVCC from env.
Parameters
----------
code : str
The cuda code.
target : str
The target format
arch : str
The architecture
options : str or list of str
The additional options
path_target : str, optional
Output file.
Return
------
cubin : bytearray
The bytearray of the cubin
"""
temp = utils.tempdir()
if target not in ["cubin", "ptx", "fatbin"]:
raise ValueError("target must be in cubin, ptx, fatbin")
temp_code = temp.relpath("my_kernel.cu")
temp_target = temp.relpath("my_kernel.%s" % target)
with open(temp_code, "w") as out_file:
out_file.write(code)
if arch is None:
if nd.gpu(0).exist:
# auto detect the compute arch argument
arch = "sm_" + "".join(nd.gpu(0).compute_version.split("."))
else:
raise ValueError("arch(sm_xy) is not passed, and we cannot detect it from env")
file_target = path_target if path_target else temp_target
cmd = ["nvcc"]
cmd += ["--%s" % target, "-O3"]
if isinstance(arch, list):
cmd += arch
else:
cmd += ["-arch", arch]
if options:
if isinstance(options, str):
cmd += [options]
elif isinstance(options, list):
cmd += options
else:
raise ValueError("options must be str or list of str")
cmd += ["-o", file_target]
cmd += [temp_code]
# NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler
# just in case it is not in the path. On Windows it is not in the path by default.
# However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env.
# Because it is hard to do runtime compiler detection, we require nvcc is configured
# correctly by default.
# if cxx_compiler_path != "":
# cmd += ["-ccbin", cxx_compiler_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = code
msg += "\nCompilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
data = bytearray(open(file_target, "rb").read())
if not data:
raise RuntimeError("Compilation error: empty result is generated")
return data
def find_cuda_path():
"""Utility function to find cuda path
Returns
-------
path : str
Path to cuda root.
"""
if "CUDA_PATH" in os.environ:
return os.environ["CUDA_PATH"]
cmd = ["which", "nvcc"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
return os.path.realpath(os.path.join(str(out).strip(), "../.."))
cuda_path = "/usr/local/cuda"
if os.path.exists(os.path.join(cuda_path, "bin/nvcc")):
return cuda_path
raise RuntimeError("Cannot find cuda path")
def get_cuda_version(cuda_path):
"""Utility function to get cuda version
Parameters
----------
cuda_path : str
Path to cuda root.
Returns
-------
version : float
The cuda version
"""
version_file_path = os.path.join(cuda_path, "version.txt")
if not os.path.exists(version_file_path):
# Debian/Ubuntu repackaged CUDA path
version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt")
try:
with open(version_file_path) as f:
version_str = f.readline().replace("\n", "").replace("\r", "")
return float(version_str.split(" ")[2][:2])
except FileNotFoundError:
pass
cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
release_line = [l for l in out.split("\n") if "release" in l][0]
release_fields = [s.strip() for s in release_line.split(",")]
release_version = [f[1:] for f in release_fields if f.startswith("V")][0]
major_minor = ".".join(release_version.split(".")[:2])
return float(major_minor)
raise RuntimeError("Cannot read cuda version file")
@tvm._ffi.register_func("tvm_callback_libdevice_path")
def find_libdevice_path(arch):
"""Utility function to find libdevice
Parameters
----------
arch : int
The compute architecture in int
Returns
-------
path : str
Path to libdevice.
"""
cuda_path = find_cuda_path()
lib_path = os.path.join(cuda_path, "nvvm/libdevice")
if not os.path.exists(lib_path):
# Debian/Ubuntu repackaged CUDA path
lib_path = os.path.join(cuda_path, "lib/nvidia-cuda-toolkit/libdevice")
selected_ver = 0
selected_path = None
cuda_ver = get_cuda_version(cuda_path)
if cuda_ver in (9.0, 9.1, 10.0, 10.1, 10.2, 11.0, 11.1, 11.2):
path = os.path.join(lib_path, "libdevice.10.bc")
else:
for fn in os.listdir(lib_path):
if not fn.startswith("libdevice"):
continue
ver = int(fn.split(".")[-3].split("_")[-1])
if selected_ver < ver <= arch:
selected_ver = ver
selected_path = fn
if selected_path is None:
raise RuntimeError("Cannot find libdevice for arch {}".format(arch))
path = os.path.join(lib_path, selected_path)
return path
def callback_libdevice_path(arch):
try:
return find_libdevice_path(arch)
except RuntimeError:
warnings.warn("Cannot find libdevice path")
return ""
def get_target_compute_version(target=None):
"""Utility function to get compute capability of compilation target.
Looks for the arch in three different places, first in the target attributes, then the global
scope, and finally the GPU device (if it exists).
Parameters
----------
target : tvm.target.Target, optional
The compilation target
Returns
-------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
# 1. Target
if target:
if "arch" in target.attrs:
compute_version = target.attrs["arch"]
major, minor = compute_version.split("_")[1]
return major + "." + minor
# 2. Global scope
from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel
if AutotvmGlobalScope.current.cuda_target_arch:
major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1]
return major + "." + minor
# 3. GPU
if tvm.gpu(0).exist:
return tvm.gpu(0).compute_version
warnings.warn(
"No CUDA architecture was specified or GPU detected."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return None
def parse_compute_version(compute_version):
"""Parse compute capability string to divide major and minor version
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.0")
Returns
-------
major : int
major version number
minor : int
minor version number
"""
split_ver = compute_version.split(".")
try:
major = int(split_ver[0])
minor = int(split_ver[1])
return major, minor
except (IndexError, ValueError) as err:
# pylint: disable=raise-missing-from
raise RuntimeError("Compute version parsing error: " + str(err))
def have_fp16(compute_version):
"""Either fp16 support is provided in the compute capability or not
Parameters
----------
compute_version: str
compute capability of a GPU (e.g. "6.0")
"""
major, minor = parse_compute_version(compute_version)
# fp 16 support in reference to:
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions
if major == 5 and minor == 3:
return True
if major >= 6:
return True
return False
def have_int8(compute_version):
"""Either int8 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.1")
"""
major, _ = parse_compute_version(compute_version)
if major >= 6:
return True
return False
def have_tensorcore(compute_version=None, target=None):
"""Either TensorCore support is provided in the compute capability or not
Parameters
----------
compute_version : str, optional
compute capability of a GPU (e.g. "7.0").
target : tvm.target.Target, optional
The compilation target, will be used to determine arch if compute_version
isn't specified.
"""
if compute_version is None:
if tvm.gpu(0).exist:
compute_version = tvm.gpu(0).compute_version
else:
if target is None or "arch" not in target.attrs:
warnings.warn(
"Tensorcore will be disabled due to no CUDA architecture specified."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return False
compute_version = target.attrs["arch"]
# Compute version will be in the form "sm_{major}{minor}"
major, minor = compute_version.split("_")[1]
compute_version = major + "." + minor
major, _ = parse_compute_version(compute_version)
if major >= 7:
return True
return False
def have_cudagraph():
"""Either CUDA Graph support is provided"""
try:
cuda_path = find_cuda_path()
cuda_ver = get_cuda_version(cuda_path)
if cuda_ver < 10.0:
return False
return True
except RuntimeError:
return False
def have_bf16(compute_version):
"""Either bf16 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
major, _ = parse_compute_version(compute_version)
if major >= 8:
return True
return False
| <filename>python/tvm/contrib/nvcc.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke nvcc compiler in the system"""
from __future__ import absolute_import as _abs
import subprocess
import os
import warnings
import tvm._ffi
from tvm.runtime import ndarray as nd
from . import utils
from .._ffi.base import py_str
def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None):
"""Compile cuda code with NVCC from env.
Parameters
----------
code : str
The cuda code.
target : str
The target format
arch : str
The architecture
options : str or list of str
The additional options
path_target : str, optional
Output file.
Return
------
cubin : bytearray
The bytearray of the cubin
"""
temp = utils.tempdir()
if target not in ["cubin", "ptx", "fatbin"]:
raise ValueError("target must be in cubin, ptx, fatbin")
temp_code = temp.relpath("my_kernel.cu")
temp_target = temp.relpath("my_kernel.%s" % target)
with open(temp_code, "w") as out_file:
out_file.write(code)
if arch is None:
if nd.gpu(0).exist:
# auto detect the compute arch argument
arch = "sm_" + "".join(nd.gpu(0).compute_version.split("."))
else:
raise ValueError("arch(sm_xy) is not passed, and we cannot detect it from env")
file_target = path_target if path_target else temp_target
cmd = ["nvcc"]
cmd += ["--%s" % target, "-O3"]
if isinstance(arch, list):
cmd += arch
else:
cmd += ["-arch", arch]
if options:
if isinstance(options, str):
cmd += [options]
elif isinstance(options, list):
cmd += options
else:
raise ValueError("options must be str or list of str")
cmd += ["-o", file_target]
cmd += [temp_code]
# NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler
# just in case it is not in the path. On Windows it is not in the path by default.
# However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env.
# Because it is hard to do runtime compiler detection, we require nvcc is configured
# correctly by default.
# if cxx_compiler_path != "":
# cmd += ["-ccbin", cxx_compiler_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = code
msg += "\nCompilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
data = bytearray(open(file_target, "rb").read())
if not data:
raise RuntimeError("Compilation error: empty result is generated")
return data
def find_cuda_path():
"""Utility function to find cuda path
Returns
-------
path : str
Path to cuda root.
"""
if "CUDA_PATH" in os.environ:
return os.environ["CUDA_PATH"]
cmd = ["which", "nvcc"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
return os.path.realpath(os.path.join(str(out).strip(), "../.."))
cuda_path = "/usr/local/cuda"
if os.path.exists(os.path.join(cuda_path, "bin/nvcc")):
return cuda_path
raise RuntimeError("Cannot find cuda path")
def get_cuda_version(cuda_path):
"""Utility function to get cuda version
Parameters
----------
cuda_path : str
Path to cuda root.
Returns
-------
version : float
The cuda version
"""
version_file_path = os.path.join(cuda_path, "version.txt")
if not os.path.exists(version_file_path):
# Debian/Ubuntu repackaged CUDA path
version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt")
try:
with open(version_file_path) as f:
version_str = f.readline().replace("\n", "").replace("\r", "")
return float(version_str.split(" ")[2][:2])
except FileNotFoundError:
pass
cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
release_line = [l for l in out.split("\n") if "release" in l][0]
release_fields = [s.strip() for s in release_line.split(",")]
release_version = [f[1:] for f in release_fields if f.startswith("V")][0]
major_minor = ".".join(release_version.split(".")[:2])
return float(major_minor)
raise RuntimeError("Cannot read cuda version file")
@tvm._ffi.register_func("tvm_callback_libdevice_path")
def find_libdevice_path(arch):
"""Utility function to find libdevice
Parameters
----------
arch : int
The compute architecture in int
Returns
-------
path : str
Path to libdevice.
"""
cuda_path = find_cuda_path()
lib_path = os.path.join(cuda_path, "nvvm/libdevice")
if not os.path.exists(lib_path):
# Debian/Ubuntu repackaged CUDA path
lib_path = os.path.join(cuda_path, "lib/nvidia-cuda-toolkit/libdevice")
selected_ver = 0
selected_path = None
cuda_ver = get_cuda_version(cuda_path)
if cuda_ver in (9.0, 9.1, 10.0, 10.1, 10.2, 11.0, 11.1, 11.2):
path = os.path.join(lib_path, "libdevice.10.bc")
else:
for fn in os.listdir(lib_path):
if not fn.startswith("libdevice"):
continue
ver = int(fn.split(".")[-3].split("_")[-1])
if selected_ver < ver <= arch:
selected_ver = ver
selected_path = fn
if selected_path is None:
raise RuntimeError("Cannot find libdevice for arch {}".format(arch))
path = os.path.join(lib_path, selected_path)
return path
def callback_libdevice_path(arch):
try:
return find_libdevice_path(arch)
except RuntimeError:
warnings.warn("Cannot find libdevice path")
return ""
def get_target_compute_version(target=None):
"""Utility function to get compute capability of compilation target.
Looks for the arch in three different places, first in the target attributes, then the global
scope, and finally the GPU device (if it exists).
Parameters
----------
target : tvm.target.Target, optional
The compilation target
Returns
-------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
# 1. Target
if target:
if "arch" in target.attrs:
compute_version = target.attrs["arch"]
major, minor = compute_version.split("_")[1]
return major + "." + minor
# 2. Global scope
from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel
if AutotvmGlobalScope.current.cuda_target_arch:
major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1]
return major + "." + minor
# 3. GPU
if tvm.gpu(0).exist:
return tvm.gpu(0).compute_version
warnings.warn(
"No CUDA architecture was specified or GPU detected."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return None
def parse_compute_version(compute_version):
"""Parse compute capability string to divide major and minor version
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.0")
Returns
-------
major : int
major version number
minor : int
minor version number
"""
split_ver = compute_version.split(".")
try:
major = int(split_ver[0])
minor = int(split_ver[1])
return major, minor
except (IndexError, ValueError) as err:
# pylint: disable=raise-missing-from
raise RuntimeError("Compute version parsing error: " + str(err))
def have_fp16(compute_version):
"""Either fp16 support is provided in the compute capability or not
Parameters
----------
compute_version: str
compute capability of a GPU (e.g. "6.0")
"""
major, minor = parse_compute_version(compute_version)
# fp 16 support in reference to:
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions
if major == 5 and minor == 3:
return True
if major >= 6:
return True
return False
def have_int8(compute_version):
"""Either int8 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.1")
"""
major, _ = parse_compute_version(compute_version)
if major >= 6:
return True
return False
def have_tensorcore(compute_version=None, target=None):
"""Either TensorCore support is provided in the compute capability or not
Parameters
----------
compute_version : str, optional
compute capability of a GPU (e.g. "7.0").
target : tvm.target.Target, optional
The compilation target, will be used to determine arch if compute_version
isn't specified.
"""
if compute_version is None:
if tvm.gpu(0).exist:
compute_version = tvm.gpu(0).compute_version
else:
if target is None or "arch" not in target.attrs:
warnings.warn(
"Tensorcore will be disabled due to no CUDA architecture specified."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return False
compute_version = target.attrs["arch"]
# Compute version will be in the form "sm_{major}{minor}"
major, minor = compute_version.split("_")[1]
compute_version = major + "." + minor
major, _ = parse_compute_version(compute_version)
if major >= 7:
return True
return False
def have_cudagraph():
"""Either CUDA Graph support is provided"""
try:
cuda_path = find_cuda_path()
cuda_ver = get_cuda_version(cuda_path)
if cuda_ver < 10.0:
return False
return True
except RuntimeError:
return False
def have_bf16(compute_version):
"""Either bf16 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
major, _ = parse_compute_version(compute_version)
if major >= 8:
return True
return False
| en | 0.665033 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name Utility to invoke nvcc compiler in the system Compile cuda code with NVCC from env. Parameters ---------- code : str The cuda code. target : str The target format arch : str The architecture options : str or list of str The additional options path_target : str, optional Output file. Return ------ cubin : bytearray The bytearray of the cubin # auto detect the compute arch argument # NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler # just in case it is not in the path. On Windows it is not in the path by default. # However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env. # Because it is hard to do runtime compiler detection, we require nvcc is configured # correctly by default. # if cxx_compiler_path != "": # cmd += ["-ccbin", cxx_compiler_path] Utility function to find cuda path Returns ------- path : str Path to cuda root. Utility function to get cuda version Parameters ---------- cuda_path : str Path to cuda root. Returns ------- version : float The cuda version # Debian/Ubuntu repackaged CUDA path Utility function to find libdevice Parameters ---------- arch : int The compute architecture in int Returns ------- path : str Path to libdevice. # Debian/Ubuntu repackaged CUDA path Utility function to get compute capability of compilation target. Looks for the arch in three different places, first in the target attributes, then the global scope, and finally the GPU device (if it exists). Parameters ---------- target : tvm.target.Target, optional The compilation target Returns ------- compute_version : str compute capability of a GPU (e.g. "8.0") # 1. Target # 2. Global scope # pylint: disable=import-outside-toplevel # 3. GPU Parse compute capability string to divide major and minor version Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.0") Returns ------- major : int major version number minor : int minor version number # pylint: disable=raise-missing-from Either fp16 support is provided in the compute capability or not Parameters ---------- compute_version: str compute capability of a GPU (e.g. "6.0") # fp 16 support in reference to: # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions Either int8 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.1") Either TensorCore support is provided in the compute capability or not Parameters ---------- compute_version : str, optional compute capability of a GPU (e.g. "7.0"). target : tvm.target.Target, optional The compilation target, will be used to determine arch if compute_version isn't specified. # Compute version will be in the form "sm_{major}{minor}" Either CUDA Graph support is provided Either bf16 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "8.0") | 2.128241 | 2 |
calc/history/calculations.py | dhruvshah1996/Project3 | 0 | 785 | """Calculation history Class"""
from calc.calculations.addition import Addition
from calc.calculations.subtraction import Subtraction
from calc.calculations.multiplication import Multiplication
from calc.calculations.division import Division
class Calculations:
"""Calculations class manages the history of calculations"""
history = []
# pylint: disable=too-few-public-methods
@staticmethod
def clear_history():
"""clear the history of calculations"""
Calculations.history.clear()
return True
@staticmethod
def count_history():
"""get number of items in history"""
return len(Calculations.history)
@staticmethod
def get_last_calculation_object():
"""get last calculation"""
return Calculations.history[-1]
@staticmethod
def get_last_calculation_result_value():
"""get last calculation"""
calculation = Calculations.get_last_calculation_object()
return calculation.get_result()
@staticmethod
def get_first_calculation():
"""get first calculation"""
return Calculations.history[0]
@staticmethod
def get_calculation(num):
""" get a specific calculation from history"""
return Calculations.history[num]
@staticmethod
def add_calculation(calculation):
""" get a generic calculation from history"""
return Calculations.history.append(calculation)
@staticmethod
def add_addition_calculation_to_history(values):
"""create an addition and add object to history using factory method create"""
Calculations.add_calculation(Addition.create(values))
#Get the result of the calculation
return True
@staticmethod
def add_subtraction_calculation_to_history(values):
"""create a subtraction object to history using factory method create"""
Calculations.add_calculation(Subtraction.create(values))
return True
@staticmethod
def add_multiplication_calculation_to_history(values):
"""Add a multiplication object to history using factory method create"""
Calculations.add_calculation(Multiplication.create(values))
return True
@staticmethod
def add_division_calculation_to_history(values):
"Add a division object to history using factory method create"
Calculations.add_calculation(Division.create(values))
return True | """Calculation history Class"""
from calc.calculations.addition import Addition
from calc.calculations.subtraction import Subtraction
from calc.calculations.multiplication import Multiplication
from calc.calculations.division import Division
class Calculations:
"""Calculations class manages the history of calculations"""
history = []
# pylint: disable=too-few-public-methods
@staticmethod
def clear_history():
"""clear the history of calculations"""
Calculations.history.clear()
return True
@staticmethod
def count_history():
"""get number of items in history"""
return len(Calculations.history)
@staticmethod
def get_last_calculation_object():
"""get last calculation"""
return Calculations.history[-1]
@staticmethod
def get_last_calculation_result_value():
"""get last calculation"""
calculation = Calculations.get_last_calculation_object()
return calculation.get_result()
@staticmethod
def get_first_calculation():
"""get first calculation"""
return Calculations.history[0]
@staticmethod
def get_calculation(num):
""" get a specific calculation from history"""
return Calculations.history[num]
@staticmethod
def add_calculation(calculation):
""" get a generic calculation from history"""
return Calculations.history.append(calculation)
@staticmethod
def add_addition_calculation_to_history(values):
"""create an addition and add object to history using factory method create"""
Calculations.add_calculation(Addition.create(values))
#Get the result of the calculation
return True
@staticmethod
def add_subtraction_calculation_to_history(values):
"""create a subtraction object to history using factory method create"""
Calculations.add_calculation(Subtraction.create(values))
return True
@staticmethod
def add_multiplication_calculation_to_history(values):
"""Add a multiplication object to history using factory method create"""
Calculations.add_calculation(Multiplication.create(values))
return True
@staticmethod
def add_division_calculation_to_history(values):
"Add a division object to history using factory method create"
Calculations.add_calculation(Division.create(values))
return True | en | 0.697037 | Calculation history Class Calculations class manages the history of calculations # pylint: disable=too-few-public-methods clear the history of calculations get number of items in history get last calculation get last calculation get first calculation get a specific calculation from history get a generic calculation from history create an addition and add object to history using factory method create #Get the result of the calculation create a subtraction object to history using factory method create Add a multiplication object to history using factory method create | 3.79278 | 4 |
Python/17 - 081 - extraindo dados de uma lista.py | matheusguerreiro/python | 0 | 786 | <gh_stars>0
# Aula 17 (Listas (Parte 1))
valores = []
while True:
valor = int(input('Digite um Valor ou -1 para Finalizar: '))
if valor < 0:
print('\nFinalizando...')
break
else:
valores.append(valor)
print(f'Foram digitados {len(valores)} números')
valores.sort(reverse=True)
print(f'Lista ordenada de forma decrescente: {valores}')
if 5 in valores:
valores.reverse()
print(f'O valor 5 foi digitado e está na {valores.index(5)} posição.')
else:
print('Valor 5 não encontrado na lista.')
| # Aula 17 (Listas (Parte 1))
valores = []
while True:
valor = int(input('Digite um Valor ou -1 para Finalizar: '))
if valor < 0:
print('\nFinalizando...')
break
else:
valores.append(valor)
print(f'Foram digitados {len(valores)} números')
valores.sort(reverse=True)
print(f'Lista ordenada de forma decrescente: {valores}')
if 5 in valores:
valores.reverse()
print(f'O valor 5 foi digitado e está na {valores.index(5)} posição.')
else:
print('Valor 5 não encontrado na lista.') | pt | 0.467859 | # Aula 17 (Listas (Parte 1)) | 3.98395 | 4 |
yekpay/migrations/0014_auto_20181120_1453.py | maryam-afzp/django-yekpay | 3 | 787 | <filename>yekpay/migrations/0014_auto_20181120_1453.py
# Generated by Django 2.0.9 on 2018-11-20 11:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('yekpay', '0013_auto_20181030_1911'),
]
operations = [
migrations.RenameField(
model_name='transaction',
old_name='authorityStart',
new_name='authority_start',
),
migrations.RenameField(
model_name='transaction',
old_name='authorityVerify',
new_name='authority_verify',
),
migrations.RenameField(
model_name='transaction',
old_name='failureReason',
new_name='failure_reason',
),
migrations.RenameField(
model_name='transaction',
old_name='firstName',
new_name='first_name',
),
migrations.RenameField(
model_name='transaction',
old_name='fromCurrencyCode',
new_name='from_currency_code',
),
migrations.RenameField(
model_name='transaction',
old_name='lastName',
new_name='last_name',
),
migrations.RenameField(
model_name='transaction',
old_name='orderNumber',
new_name='order_number',
),
migrations.RenameField(
model_name='transaction',
old_name='postalCode',
new_name='postal_code',
),
migrations.RenameField(
model_name='transaction',
old_name='toCurrencyCode',
new_name='to_currency_code',
),
migrations.AddField(
model_name='transaction',
name='simulation',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='transaction',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| <filename>yekpay/migrations/0014_auto_20181120_1453.py
# Generated by Django 2.0.9 on 2018-11-20 11:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('yekpay', '0013_auto_20181030_1911'),
]
operations = [
migrations.RenameField(
model_name='transaction',
old_name='authorityStart',
new_name='authority_start',
),
migrations.RenameField(
model_name='transaction',
old_name='authorityVerify',
new_name='authority_verify',
),
migrations.RenameField(
model_name='transaction',
old_name='failureReason',
new_name='failure_reason',
),
migrations.RenameField(
model_name='transaction',
old_name='firstName',
new_name='first_name',
),
migrations.RenameField(
model_name='transaction',
old_name='fromCurrencyCode',
new_name='from_currency_code',
),
migrations.RenameField(
model_name='transaction',
old_name='lastName',
new_name='last_name',
),
migrations.RenameField(
model_name='transaction',
old_name='orderNumber',
new_name='order_number',
),
migrations.RenameField(
model_name='transaction',
old_name='postalCode',
new_name='postal_code',
),
migrations.RenameField(
model_name='transaction',
old_name='toCurrencyCode',
new_name='to_currency_code',
),
migrations.AddField(
model_name='transaction',
name='simulation',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='transaction',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| en | 0.84822 | # Generated by Django 2.0.9 on 2018-11-20 11:23 | 1.575184 | 2 |
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/modules/__init__.py | nishaq503/polus-plugins-dl | 0 | 788 | from .bn import ABN, InPlaceABN, InPlaceABNWrapper, InPlaceABNSync, InPlaceABNSyncWrapper
from .misc import GlobalAvgPool2d
from .residual import IdentityResidualBlock
from .dense import DenseModule
| from .bn import ABN, InPlaceABN, InPlaceABNWrapper, InPlaceABNSync, InPlaceABNSyncWrapper
from .misc import GlobalAvgPool2d
from .residual import IdentityResidualBlock
from .dense import DenseModule
| none | 1 | 0.996387 | 1 |
|
python/pyarrow/tests/test_compute.py | kylebrandt/arrow | 0 | 789 | <reponame>kylebrandt/arrow
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import pyarrow as pa
import pyarrow.compute
all_array_types = [
('bool', [True, False, False, True, True]),
('uint8', np.arange(5)),
('int8', np.arange(5)),
('uint16', np.arange(5)),
('int16', np.arange(5)),
('uint32', np.arange(5)),
('int32', np.arange(5)),
('uint64', np.arange(5, 10)),
('int64', np.arange(5, 10)),
('float', np.arange(0, 0.5, 0.1)),
('double', np.arange(0, 0.5, 0.1)),
('string', ['a', 'b', None, 'ddd', 'ee']),
('binary', [b'a', b'b', b'c', b'ddd', b'ee']),
(pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']),
(pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]),
(pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]),
(pa.struct([('a', pa.int8()), ('b', pa.int8())]), [
{'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]),
]
numerical_arrow_types = [
pa.int8(),
pa.int16(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint64(),
pa.float32(),
pa.float64()
]
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_array(arrow_type):
arr = pa.array([1, 2, 3, 4], type=arrow_type)
assert arr.sum() == 10
assert pa.compute.sum(arr) == 10
arr = pa.array([], type=arrow_type)
assert arr.sum() == None # noqa: E711
assert pa.compute.sum(arr) == None # noqa: E711
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_chunked_array(arrow_type):
arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)])
assert pa.compute.sum(arr) == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type)
])
assert pa.compute.sum(arr) == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([3, 4], type=arrow_type)
])
assert pa.compute.sum(arr) == 10
arr = pa.chunked_array((), type=arrow_type)
print(arr, type(arr))
assert arr.num_chunks == 0
assert pa.compute.sum(arr) == None # noqa: E711
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_take(ty, values):
arr = pa.array(values, type=ty)
for indices_type in [pa.int8(), pa.int64()]:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([values[0], values[4], values[2], None], type=ty)
assert result.equals(expected)
# empty indices
indices = pa.array([], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([], type=ty)
assert result.equals(expected)
indices = pa.array([2, 5])
with pytest.raises(IndexError):
arr.take(indices)
indices = pa.array([2, -1])
with pytest.raises(IndexError):
arr.take(indices)
def test_take_indices_types():
arr = pa.array(range(5))
for indices_type in ['uint8', 'int8', 'uint16', 'int16',
'uint32', 'int32', 'uint64', 'int64']:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([0, 4, 2, None])
assert result.equals(expected)
for indices_type in [pa.float32(), pa.float64()]:
indices = pa.array([0, 4, 2], type=indices_type)
with pytest.raises(NotImplementedError):
arr.take(indices)
@pytest.mark.parametrize('ordered', [False, True])
def test_take_dictionary(ordered):
arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
ordered=ordered)
result = arr.take(pa.array([0, 1, 3]))
result.validate()
assert result.to_pylist() == ['a', 'b', 'a']
assert result.dictionary.to_pylist() == ['a', 'b', 'c']
assert result.type.ordered is ordered
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_filter(ty, values):
arr = pa.array(values, type=ty)
mask = pa.array([True, False, False, True, None])
result = arr.filter(mask, null_selection_behavior='drop')
result.validate()
assert result.equals(pa.array([values[0], values[3]], type=ty))
result = arr.filter(mask, null_selection_behavior='emit_null')
result.validate()
assert result.equals(pa.array([values[0], values[3], None], type=ty))
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
arr.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(ValueError, match="must all be the same length"):
arr.filter(mask)
def test_filter_chunked_array():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
expected_drop = pa.chunked_array([["a"], ["e"]])
expected_null = pa.chunked_array([["a"], [None, "e"]])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False, None], [False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = arr.filter(mask)
assert result.equals(expected_drop)
result = arr.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_record_batch():
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
# mask is array
mask = pa.array([True, False, None, False, True])
result = batch.filter(mask)
expected = pa.record_batch([pa.array(["a", "e"])], names=["a'"])
assert result.equals(expected)
result = batch.filter(mask, null_selection_behavior="emit_null")
expected = pa.record_batch([pa.array(["a", None, "e"])], names=["a'"])
assert result.equals(expected)
def test_filter_table():
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
expected_drop = pa.table([pa.array(["a", "e"])], names=["a"])
expected_null = pa.table([pa.array(["a", None, "e"])], names=["a"])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False], [None, False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = table.filter(mask)
assert result.equals(expected_drop)
result = table.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_errors():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
for obj in [arr, batch, table]:
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
obj.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(pa.ArrowInvalid,
match="must all be the same length"):
obj.filter(mask)
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_array(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr1 = con([1, 2, 3, 4, None])
arr2 = con([1, 1, 4, None, 4])
result = arr1 == arr2
assert result.equals(con([True, False, False, None, None]))
result = arr1 != arr2
assert result.equals(con([False, True, True, None, None]))
result = arr1 < arr2
assert result.equals(con([False, False, True, None, None]))
result = arr1 <= arr2
assert result.equals(con([True, False, True, None, None]))
result = arr1 > arr2
assert result.equals(con([False, True, False, None, None]))
result = arr1 >= arr2
assert result.equals(con([True, True, False, None, None]))
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_scalar(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr = con([1, 2, 3, None])
# TODO this is a hacky way to construct a scalar ..
scalar = pa.array([2]).sum()
result = arr == scalar
assert result.equals(con([False, True, False, None]))
result = arr != scalar
assert result.equals(con([True, False, True, None]))
result = arr < scalar
assert result.equals(con([True, False, False, None]))
result = arr <= scalar
assert result.equals(con([True, True, False, None]))
result = arr > scalar
assert result.equals(con([False, False, True, None]))
result = arr >= scalar
assert result.equals(con([False, True, True, None]))
def test_compare_chunked_array_mixed():
arr = pa.array([1, 2, 3, 4, None])
arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]])
arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]])
expected = pa.chunked_array([[True, True, True, True, None]])
for result in [
arr == arr_chunked,
arr_chunked == arr,
arr_chunked == arr_chunked2,
]:
assert result.equals(expected)
def test_arithmetic_add():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pa.compute.add(left, right)
expected = pa.array([1, 1, 4, 6, 8])
assert result.equals(expected)
def test_arithmetic_subtract():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pa.compute.subtract(left, right)
expected = pa.array([1, 3, 2, 2, 2])
assert result.equals(expected)
def test_arithmetic_multiply():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pa.compute.multiply(left, right)
expected = pa.array([0, -2, 3, 8, 15])
assert result.equals(expected)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import pyarrow as pa
import pyarrow.compute
all_array_types = [
('bool', [True, False, False, True, True]),
('uint8', np.arange(5)),
('int8', np.arange(5)),
('uint16', np.arange(5)),
('int16', np.arange(5)),
('uint32', np.arange(5)),
('int32', np.arange(5)),
('uint64', np.arange(5, 10)),
('int64', np.arange(5, 10)),
('float', np.arange(0, 0.5, 0.1)),
('double', np.arange(0, 0.5, 0.1)),
('string', ['a', 'b', None, 'ddd', 'ee']),
('binary', [b'a', b'b', b'c', b'ddd', b'ee']),
(pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']),
(pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]),
(pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]),
(pa.struct([('a', pa.int8()), ('b', pa.int8())]), [
{'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]),
]
numerical_arrow_types = [
pa.int8(),
pa.int16(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint64(),
pa.float32(),
pa.float64()
]
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_array(arrow_type):
arr = pa.array([1, 2, 3, 4], type=arrow_type)
assert arr.sum() == 10
assert pa.compute.sum(arr) == 10
arr = pa.array([], type=arrow_type)
assert arr.sum() == None # noqa: E711
assert pa.compute.sum(arr) == None # noqa: E711
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_sum_chunked_array(arrow_type):
arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)])
assert pa.compute.sum(arr) == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type)
])
assert pa.compute.sum(arr) == 10
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([3, 4], type=arrow_type)
])
assert pa.compute.sum(arr) == 10
arr = pa.chunked_array((), type=arrow_type)
print(arr, type(arr))
assert arr.num_chunks == 0
assert pa.compute.sum(arr) == None # noqa: E711
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_take(ty, values):
arr = pa.array(values, type=ty)
for indices_type in [pa.int8(), pa.int64()]:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([values[0], values[4], values[2], None], type=ty)
assert result.equals(expected)
# empty indices
indices = pa.array([], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([], type=ty)
assert result.equals(expected)
indices = pa.array([2, 5])
with pytest.raises(IndexError):
arr.take(indices)
indices = pa.array([2, -1])
with pytest.raises(IndexError):
arr.take(indices)
def test_take_indices_types():
arr = pa.array(range(5))
for indices_type in ['uint8', 'int8', 'uint16', 'int16',
'uint32', 'int32', 'uint64', 'int64']:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([0, 4, 2, None])
assert result.equals(expected)
for indices_type in [pa.float32(), pa.float64()]:
indices = pa.array([0, 4, 2], type=indices_type)
with pytest.raises(NotImplementedError):
arr.take(indices)
@pytest.mark.parametrize('ordered', [False, True])
def test_take_dictionary(ordered):
arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
ordered=ordered)
result = arr.take(pa.array([0, 1, 3]))
result.validate()
assert result.to_pylist() == ['a', 'b', 'a']
assert result.dictionary.to_pylist() == ['a', 'b', 'c']
assert result.type.ordered is ordered
@pytest.mark.parametrize(('ty', 'values'), all_array_types)
def test_filter(ty, values):
arr = pa.array(values, type=ty)
mask = pa.array([True, False, False, True, None])
result = arr.filter(mask, null_selection_behavior='drop')
result.validate()
assert result.equals(pa.array([values[0], values[3]], type=ty))
result = arr.filter(mask, null_selection_behavior='emit_null')
result.validate()
assert result.equals(pa.array([values[0], values[3], None], type=ty))
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
arr.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(ValueError, match="must all be the same length"):
arr.filter(mask)
def test_filter_chunked_array():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
expected_drop = pa.chunked_array([["a"], ["e"]])
expected_null = pa.chunked_array([["a"], [None, "e"]])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False, None], [False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = arr.filter(mask)
assert result.equals(expected_drop)
result = arr.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_record_batch():
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
# mask is array
mask = pa.array([True, False, None, False, True])
result = batch.filter(mask)
expected = pa.record_batch([pa.array(["a", "e"])], names=["a'"])
assert result.equals(expected)
result = batch.filter(mask, null_selection_behavior="emit_null")
expected = pa.record_batch([pa.array(["a", None, "e"])], names=["a'"])
assert result.equals(expected)
def test_filter_table():
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
expected_drop = pa.table([pa.array(["a", "e"])], names=["a"])
expected_null = pa.table([pa.array(["a", None, "e"])], names=["a"])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False], [None, False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = table.filter(mask)
assert result.equals(expected_drop)
result = table.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_filter_errors():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
batch = pa.record_batch(
[pa.array(["a", None, "c", "d", "e"])], names=["a'"])
table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"])
for obj in [arr, batch, table]:
# non-boolean dtype
mask = pa.array([0, 1, 0, 1, 0])
with pytest.raises(NotImplementedError):
obj.filter(mask)
# wrong length
mask = pa.array([True, False, True])
with pytest.raises(pa.ArrowInvalid,
match="must all be the same length"):
obj.filter(mask)
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_array(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr1 = con([1, 2, 3, 4, None])
arr2 = con([1, 1, 4, None, 4])
result = arr1 == arr2
assert result.equals(con([True, False, False, None, None]))
result = arr1 != arr2
assert result.equals(con([False, True, True, None, None]))
result = arr1 < arr2
assert result.equals(con([False, False, True, None, None]))
result = arr1 <= arr2
assert result.equals(con([True, False, True, None, None]))
result = arr1 > arr2
assert result.equals(con([False, True, False, None, None]))
result = arr1 >= arr2
assert result.equals(con([True, True, False, None, None]))
@pytest.mark.parametrize("typ", ["array", "chunked_array"])
def test_compare_scalar(typ):
if typ == "array":
def con(values): return pa.array(values)
else:
def con(values): return pa.chunked_array([values])
arr = con([1, 2, 3, None])
# TODO this is a hacky way to construct a scalar ..
scalar = pa.array([2]).sum()
result = arr == scalar
assert result.equals(con([False, True, False, None]))
result = arr != scalar
assert result.equals(con([True, False, True, None]))
result = arr < scalar
assert result.equals(con([True, False, False, None]))
result = arr <= scalar
assert result.equals(con([True, True, False, None]))
result = arr > scalar
assert result.equals(con([False, False, True, None]))
result = arr >= scalar
assert result.equals(con([False, True, True, None]))
def test_compare_chunked_array_mixed():
arr = pa.array([1, 2, 3, 4, None])
arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]])
arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]])
expected = pa.chunked_array([[True, True, True, True, None]])
for result in [
arr == arr_chunked,
arr_chunked == arr,
arr_chunked == arr_chunked2,
]:
assert result.equals(expected)
def test_arithmetic_add():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pa.compute.add(left, right)
expected = pa.array([1, 1, 4, 6, 8])
assert result.equals(expected)
def test_arithmetic_subtract():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pa.compute.subtract(left, right)
expected = pa.array([1, 3, 2, 2, 2])
assert result.equals(expected)
def test_arithmetic_multiply():
left = pa.array([1, 2, 3, 4, 5])
right = pa.array([0, -1, 1, 2, 3])
result = pa.compute.multiply(left, right)
expected = pa.array([0, -2, 3, 8, 15])
assert result.equals(expected) | en | 0.824552 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # noqa: E711 # noqa: E711 # noqa: E711 # empty indices # non-boolean dtype # wrong length # mask is array # mask is chunked array # mask is python object # mask is array # mask is array # mask is chunked array # mask is python object # non-boolean dtype # wrong length # TODO this is a hacky way to construct a scalar .. | 1.627794 | 2 |
python/sdk/client/api/log_api.py | ashwinath/merlin | 0 | 790 | # coding: utf-8
"""
Merlin
API Guide for accessing Merlin's model management, deployment, and serving functionalities # noqa: E501
OpenAPI spec version: 0.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from client.api_client import ApiClient
class LogApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def logs_get(self, name, pod_name, namespace, cluster, **kwargs): # noqa: E501
"""Retrieve log from a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.logs_get(name, pod_name, namespace, cluster, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str pod_name: (required)
:param str namespace: (required)
:param str cluster: (required)
:param str follow:
:param str limit_bytes:
:param str pretty:
:param str previous:
:param str since_seconds:
:param str since_time:
:param str tail_lines:
:param str timestamps:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.logs_get_with_http_info(name, pod_name, namespace, cluster, **kwargs) # noqa: E501
else:
(data) = self.logs_get_with_http_info(name, pod_name, namespace, cluster, **kwargs) # noqa: E501
return data
def logs_get_with_http_info(self, name, pod_name, namespace, cluster, **kwargs): # noqa: E501
"""Retrieve log from a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.logs_get_with_http_info(name, pod_name, namespace, cluster, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str pod_name: (required)
:param str namespace: (required)
:param str cluster: (required)
:param str follow:
:param str limit_bytes:
:param str pretty:
:param str previous:
:param str since_seconds:
:param str since_time:
:param str tail_lines:
:param str timestamps:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pod_name', 'namespace', 'cluster', 'follow', 'limit_bytes', 'pretty', 'previous', 'since_seconds', 'since_time', 'tail_lines', 'timestamps'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method logs_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `logs_get`") # noqa: E501
# verify the required parameter 'pod_name' is set
if ('pod_name' not in params or
params['pod_name'] is None):
raise ValueError("Missing the required parameter `pod_name` when calling `logs_get`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `logs_get`") # noqa: E501
# verify the required parameter 'cluster' is set
if ('cluster' not in params or
params['cluster'] is None):
raise ValueError("Missing the required parameter `cluster` when calling `logs_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'pod_name' in params:
query_params.append(('pod_name', params['pod_name'])) # noqa: E501
if 'namespace' in params:
query_params.append(('namespace', params['namespace'])) # noqa: E501
if 'cluster' in params:
query_params.append(('cluster', params['cluster'])) # noqa: E501
if 'follow' in params:
query_params.append(('follow', params['follow'])) # noqa: E501
if 'limit_bytes' in params:
query_params.append(('limit_bytes', params['limit_bytes'])) # noqa: E501
if 'pretty' in params:
query_params.append(('pretty', params['pretty'])) # noqa: E501
if 'previous' in params:
query_params.append(('previous', params['previous'])) # noqa: E501
if 'since_seconds' in params:
query_params.append(('since_seconds', params['since_seconds'])) # noqa: E501
if 'since_time' in params:
query_params.append(('since_time', params['since_time'])) # noqa: E501
if 'tail_lines' in params:
query_params.append(('tail_lines', params['tail_lines'])) # noqa: E501
if 'timestamps' in params:
query_params.append(('timestamps', params['timestamps'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| # coding: utf-8
"""
Merlin
API Guide for accessing Merlin's model management, deployment, and serving functionalities # noqa: E501
OpenAPI spec version: 0.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from client.api_client import ApiClient
class LogApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def logs_get(self, name, pod_name, namespace, cluster, **kwargs): # noqa: E501
"""Retrieve log from a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.logs_get(name, pod_name, namespace, cluster, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str pod_name: (required)
:param str namespace: (required)
:param str cluster: (required)
:param str follow:
:param str limit_bytes:
:param str pretty:
:param str previous:
:param str since_seconds:
:param str since_time:
:param str tail_lines:
:param str timestamps:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.logs_get_with_http_info(name, pod_name, namespace, cluster, **kwargs) # noqa: E501
else:
(data) = self.logs_get_with_http_info(name, pod_name, namespace, cluster, **kwargs) # noqa: E501
return data
def logs_get_with_http_info(self, name, pod_name, namespace, cluster, **kwargs): # noqa: E501
"""Retrieve log from a container # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.logs_get_with_http_info(name, pod_name, namespace, cluster, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str pod_name: (required)
:param str namespace: (required)
:param str cluster: (required)
:param str follow:
:param str limit_bytes:
:param str pretty:
:param str previous:
:param str since_seconds:
:param str since_time:
:param str tail_lines:
:param str timestamps:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pod_name', 'namespace', 'cluster', 'follow', 'limit_bytes', 'pretty', 'previous', 'since_seconds', 'since_time', 'tail_lines', 'timestamps'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method logs_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `logs_get`") # noqa: E501
# verify the required parameter 'pod_name' is set
if ('pod_name' not in params or
params['pod_name'] is None):
raise ValueError("Missing the required parameter `pod_name` when calling `logs_get`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `logs_get`") # noqa: E501
# verify the required parameter 'cluster' is set
if ('cluster' not in params or
params['cluster'] is None):
raise ValueError("Missing the required parameter `cluster` when calling `logs_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'pod_name' in params:
query_params.append(('pod_name', params['pod_name'])) # noqa: E501
if 'namespace' in params:
query_params.append(('namespace', params['namespace'])) # noqa: E501
if 'cluster' in params:
query_params.append(('cluster', params['cluster'])) # noqa: E501
if 'follow' in params:
query_params.append(('follow', params['follow'])) # noqa: E501
if 'limit_bytes' in params:
query_params.append(('limit_bytes', params['limit_bytes'])) # noqa: E501
if 'pretty' in params:
query_params.append(('pretty', params['pretty'])) # noqa: E501
if 'previous' in params:
query_params.append(('previous', params['previous'])) # noqa: E501
if 'since_seconds' in params:
query_params.append(('since_seconds', params['since_seconds'])) # noqa: E501
if 'since_time' in params:
query_params.append(('since_time', params['since_time'])) # noqa: E501
if 'tail_lines' in params:
query_params.append(('tail_lines', params['tail_lines'])) # noqa: E501
if 'timestamps' in params:
query_params.append(('timestamps', params['timestamps'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| en | 0.558543 | # coding: utf-8 Merlin API Guide for accessing Merlin's model management, deployment, and serving functionalities # noqa: E501 OpenAPI spec version: 0.7.0 Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 # python 2 and python 3 compatibility library NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen # noqa: E501 Retrieve log from a container # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.logs_get(name, pod_name, namespace, cluster, async_req=True) >>> result = thread.get() :param async_req bool :param str name: (required) :param str pod_name: (required) :param str namespace: (required) :param str cluster: (required) :param str follow: :param str limit_bytes: :param str pretty: :param str previous: :param str since_seconds: :param str since_time: :param str tail_lines: :param str timestamps: :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 # noqa: E501 Retrieve log from a container # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.logs_get_with_http_info(name, pod_name, namespace, cluster, async_req=True) >>> result = thread.get() :param async_req bool :param str name: (required) :param str pod_name: (required) :param str namespace: (required) :param str cluster: (required) :param str follow: :param str limit_bytes: :param str pretty: :param str previous: :param str since_seconds: :param str since_time: :param str tail_lines: :param str timestamps: :return: None If the method is called asynchronously, returns the request thread. # noqa: E501 # verify the required parameter 'name' is set # noqa: E501 # verify the required parameter 'pod_name' is set # noqa: E501 # verify the required parameter 'namespace' is set # noqa: E501 # verify the required parameter 'cluster' is set # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 | 1.780203 | 2 |
openmdao/solvers/nonlinear/nonlinear_block_jac.py | bollwyvl/OpenMDAO | 0 | 791 | """Define the NonlinearBlockJac class."""
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.solvers.solver import NonlinearSolver
from openmdao.utils.mpi import multi_proc_fail_check
class NonlinearBlockJac(NonlinearSolver):
"""
Nonlinear block Jacobi solver.
"""
SOLVER = 'NL: NLBJ'
def _single_iteration(self):
"""
Perform the operations in the iteration loop.
"""
system = self._system
self._solver_info.append_subsolver()
system._transfer('nonlinear', 'fwd')
with Recording('NonlinearBlockJac', 0, self) as rec:
# If this is a parallel group, check for analysis errors and reraise.
if len(system._subsystems_myproc) != len(system._subsystems_allprocs):
with multi_proc_fail_check(system.comm):
for subsys in system._subsystems_myproc:
subsys._solve_nonlinear()
else:
for subsys in system._subsystems_myproc:
subsys._solve_nonlinear()
system._check_child_reconf()
rec.abs = 0.0
rec.rel = 0.0
self._solver_info.pop()
def _mpi_print_header(self):
"""
Print header text before solving.
"""
if (self.options['iprint'] > 0):
pathname = self._system.pathname
if pathname:
nchar = len(pathname)
prefix = self._solver_info.prefix
header = prefix + "\n"
header += prefix + nchar * "=" + "\n"
header += prefix + pathname + "\n"
header += prefix + nchar * "="
print(header)
def _run_apply(self):
"""
Run the apply_nonlinear method on the system.
"""
system = self._system
# If this is a parallel group, check for analysis errors and reraise.
if len(system._subsystems_myproc) != len(system._subsystems_allprocs):
with multi_proc_fail_check(system.comm):
super(NonlinearBlockJac, self)._run_apply()
else:
super(NonlinearBlockJac, self)._run_apply()
| """Define the NonlinearBlockJac class."""
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.solvers.solver import NonlinearSolver
from openmdao.utils.mpi import multi_proc_fail_check
class NonlinearBlockJac(NonlinearSolver):
"""
Nonlinear block Jacobi solver.
"""
SOLVER = 'NL: NLBJ'
def _single_iteration(self):
"""
Perform the operations in the iteration loop.
"""
system = self._system
self._solver_info.append_subsolver()
system._transfer('nonlinear', 'fwd')
with Recording('NonlinearBlockJac', 0, self) as rec:
# If this is a parallel group, check for analysis errors and reraise.
if len(system._subsystems_myproc) != len(system._subsystems_allprocs):
with multi_proc_fail_check(system.comm):
for subsys in system._subsystems_myproc:
subsys._solve_nonlinear()
else:
for subsys in system._subsystems_myproc:
subsys._solve_nonlinear()
system._check_child_reconf()
rec.abs = 0.0
rec.rel = 0.0
self._solver_info.pop()
def _mpi_print_header(self):
"""
Print header text before solving.
"""
if (self.options['iprint'] > 0):
pathname = self._system.pathname
if pathname:
nchar = len(pathname)
prefix = self._solver_info.prefix
header = prefix + "\n"
header += prefix + nchar * "=" + "\n"
header += prefix + pathname + "\n"
header += prefix + nchar * "="
print(header)
def _run_apply(self):
"""
Run the apply_nonlinear method on the system.
"""
system = self._system
# If this is a parallel group, check for analysis errors and reraise.
if len(system._subsystems_myproc) != len(system._subsystems_allprocs):
with multi_proc_fail_check(system.comm):
super(NonlinearBlockJac, self)._run_apply()
else:
super(NonlinearBlockJac, self)._run_apply()
| en | 0.777544 | Define the NonlinearBlockJac class. Nonlinear block Jacobi solver. Perform the operations in the iteration loop. # If this is a parallel group, check for analysis errors and reraise. Print header text before solving. Run the apply_nonlinear method on the system. # If this is a parallel group, check for analysis errors and reraise. | 2.477365 | 2 |
tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py | Smokrow/tensorflow | 1 | 792 | <filename>tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapAndFilterFusion optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class MapAndFilterFusionTest(test.TestCase, parameterized.TestCase):
@staticmethod
def map_functions():
identity = lambda x: x
increment = lambda x: x + 1
def increment_and_square(x):
y = x + 1
return y * y
functions = [identity, increment, increment_and_square]
tests = []
for i, fun1 in enumerate(functions):
for j, fun2 in enumerate(functions):
tests.append((
"Test{}{}".format(i, j),
[fun1, fun2],
))
for k, fun3 in enumerate(functions):
tests.append((
"Test{}{}{}".format(i, j, k),
[fun1, fun2, fun3],
))
swap = lambda x, n: (n, x)
tests.append((
"Swap1",
[lambda x: (x, 42), swap],
))
tests.append((
"Swap2",
[lambda x: (x, 42), swap, swap],
))
return tuple(tests)
@parameterized.named_parameters(*map_functions.__func__())
def testMapFusion(self, functions):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "Prefetch"]))
for function in functions:
dataset = dataset.map(function)
dataset = dataset.prefetch(0).apply(optimization.optimize(["map_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(5):
result = sess.run(get_next)
r = x
for function in functions:
if isinstance(r, tuple):
r = function(*r) # Pass tuple as multiple arguments.
else:
r = function(r)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@staticmethod
def map_and_filter_functions():
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
@parameterized.named_parameters(*map_and_filter_functions.__func__())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map",
"FilterByLastComponent"])).map(function).filter(predicate).apply(
optimization.optimize(["map_and_filter_fusion"]))
self._testMapAndFilter(dataset, function, predicate)
def _testMapAndFilter(self, dataset, function, predicate):
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if sess.run(b):
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testAdditionalInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with additional inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map", "Filter"])).map(function).filter(predicate).apply(
optimization.optimize(["map_and_filter_fusion"]))
self._testMapAndFilter(dataset, function, predicate)
@staticmethod
def filter_functions():
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
tests = []
filters = [take_all, is_zero, greater]
identity = lambda x: x
for x, predicate_1 in enumerate(filters):
for y, predicate_2 in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), identity,
[predicate_1, predicate_2]))
for z, predicate_3 in enumerate(filters):
tests.append(("Mixed{}{}{}".format(x, y, z), identity,
[predicate_1, predicate_2, predicate_3]))
take_all_multiple = lambda x, y: constant_op.constant(True)
# Multi output
tests.append(("Multi1", lambda x: (x, x),
[take_all_multiple, take_all_multiple]))
tests.append(("Multi2", lambda x: (x, 2), [
take_all_multiple,
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)
]))
return tuple(tests)
@parameterized.named_parameters(*filter_functions.__func__())
def testFilterFusion(self, map_function, predicates):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "Filter",
"Prefetch"])).map(map_function)
for predicate in predicates:
dataset = dataset.filter(predicate)
dataset = dataset.prefetch(0).apply(
optimization.optimize(["filter_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(5):
r = map_function(x)
filtered = False
for predicate in predicates:
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if not sess.run(b):
filtered = True
break
if not filtered:
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| <filename>tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapAndFilterFusion optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class MapAndFilterFusionTest(test.TestCase, parameterized.TestCase):
@staticmethod
def map_functions():
identity = lambda x: x
increment = lambda x: x + 1
def increment_and_square(x):
y = x + 1
return y * y
functions = [identity, increment, increment_and_square]
tests = []
for i, fun1 in enumerate(functions):
for j, fun2 in enumerate(functions):
tests.append((
"Test{}{}".format(i, j),
[fun1, fun2],
))
for k, fun3 in enumerate(functions):
tests.append((
"Test{}{}{}".format(i, j, k),
[fun1, fun2, fun3],
))
swap = lambda x, n: (n, x)
tests.append((
"Swap1",
[lambda x: (x, 42), swap],
))
tests.append((
"Swap2",
[lambda x: (x, 42), swap, swap],
))
return tuple(tests)
@parameterized.named_parameters(*map_functions.__func__())
def testMapFusion(self, functions):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "Prefetch"]))
for function in functions:
dataset = dataset.map(function)
dataset = dataset.prefetch(0).apply(optimization.optimize(["map_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(5):
result = sess.run(get_next)
r = x
for function in functions:
if isinstance(r, tuple):
r = function(*r) # Pass tuple as multiple arguments.
else:
r = function(r)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@staticmethod
def map_and_filter_functions():
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
@parameterized.named_parameters(*map_and_filter_functions.__func__())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map",
"FilterByLastComponent"])).map(function).filter(predicate).apply(
optimization.optimize(["map_and_filter_fusion"]))
self._testMapAndFilter(dataset, function, predicate)
def _testMapAndFilter(self, dataset, function, predicate):
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if sess.run(b):
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testAdditionalInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with additional inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map", "Filter"])).map(function).filter(predicate).apply(
optimization.optimize(["map_and_filter_fusion"]))
self._testMapAndFilter(dataset, function, predicate)
@staticmethod
def filter_functions():
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
tests = []
filters = [take_all, is_zero, greater]
identity = lambda x: x
for x, predicate_1 in enumerate(filters):
for y, predicate_2 in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), identity,
[predicate_1, predicate_2]))
for z, predicate_3 in enumerate(filters):
tests.append(("Mixed{}{}{}".format(x, y, z), identity,
[predicate_1, predicate_2, predicate_3]))
take_all_multiple = lambda x, y: constant_op.constant(True)
# Multi output
tests.append(("Multi1", lambda x: (x, x),
[take_all_multiple, take_all_multiple]))
tests.append(("Multi2", lambda x: (x, 2), [
take_all_multiple,
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)
]))
return tuple(tests)
@parameterized.named_parameters(*filter_functions.__func__())
def testFilterFusion(self, map_function, predicates):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "Filter",
"Prefetch"])).map(map_function)
for predicate in predicates:
dataset = dataset.filter(predicate)
dataset = dataset.prefetch(0).apply(
optimization.optimize(["filter_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(5):
r = map_function(x)
filtered = False
for predicate in predicates:
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if not sess.run(b):
filtered = True
break
if not filtered:
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| en | 0.84276 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for the MapAndFilterFusion optimization. # Pass tuple as multiple arguments. # Multi output # Pass tuple as multiple arguments. # We are currently not supporting functions with additional inputs. # Multi output # Pass tuple as multiple arguments. | 1.958656 | 2 |
meerk40t/lihuiyu/lihuiyuemulator.py | jpirnay/meerk40t | 0 | 793 | <reponame>jpirnay/meerk40t<gh_stars>0
from meerk40t.core.cutcode import CutCode, RawCut
from meerk40t.core.parameters import Parameters
from meerk40t.core.units import UNITS_PER_MIL
from meerk40t.kernel import Module
from meerk40t.numpath import Numpath
from meerk40t.svgelements import Color
class LihuiyuEmulator(Module):
def __init__(self, context, path):
Module.__init__(self, context, path)
self.context.setting(bool, "fix_speeds", False)
self.parser = LihuiyuParser()
self.parser.fix_speeds = self.context.fix_speeds
self.parser.channel = self.context.channel("lhy")
def pos(p):
if p is None:
return
x0, y0, x1, y1 = p
self.context.signal("emulator;position", (x0, y0, x1, y1))
self.parser.position = pos
def __repr__(self):
return "LihuiyuEmulator(%s)" % self.name
def initialize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.watch(self.parser.write_packet)
def finalize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.unwatch(self.parser.write_packet)
class LihuiyuParser:
"""
LihuiyuParser parses LHYMicro-GL code with a state diagram. This should accurately reconstruct the values.
When the position is changed it calls a self.position() function if one exists.
"""
def __init__(self):
self.channel = None
self.position = None
self.board = "M2"
self.header_skipped = False
self.count_lines = 0
self.count_flag = 0
self.settings = Parameters({"speed": 20.0, "power": 1000.0})
self.speed_code = None
self.x = 0.0
self.y = 0.0
self.number_value = ""
self.distance_x = 0
self.distance_y = 0
self.filename = ""
self.laser = 0
self.left = False
self.top = False
self.x_on = False
self.y_on = False
self.small_jump = False
self.returning_compact = True
self.returning_finished = False
self.mode = None
self.raster_step = 0
self.paused_state = False
self.compact_state = False
self.finish_state = False
self.horizontal_major = False
self.fix_speeds = False
self.number_consumer = {}
def parse(self, data, elements):
self.path = Numpath()
def position(p):
if p is None:
return
from_x, from_y, to_x, to_y = p
if self.program_mode:
if self.laser:
self.path.line(complex(from_x, from_y), complex(to_x, to_y))
self.position = position
self.write(data)
self.path.uscale(UNITS_PER_MIL)
elements.elem_branch.add(
type="elem numpath",
path=self.path,
stroke=Color("black"),
**self.settings.settings,
)
elements.signal("refresh_scene", 0)
@property
def program_mode(self):
return self.compact_state
@property
def default_mode(self):
return not self.compact_state
@property
def raster_mode(self):
return self.settings.get("raster_step", 0) != 0
def new_file(self):
self.header_skipped = False
self.count_flag = 0
self.count_lines = 0
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
def header_write(self, data):
"""
Write data to the emulator including the header. This is intended for saved .egv files which include a default
header.
"""
if self.header_skipped:
self.write(data)
else:
data = LihuiyuParser.remove_header(data)
self.write(data)
def write_packet(self, packet):
self.write(packet[1:31])
def write(self, data):
for b in data:
self.process(b, chr(b))
def distance_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
self.append_distance(int(self.number_value))
self.number_value = ""
def speedcode_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_b2_consumer
def speedcode_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_accel_consumer
def speedcode_accel_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def speedcode_mult_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b1_consumer
def speedcode_dratio_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b2_consumer
def speedcode_dratio_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def raster_step_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Raster Step = %s" % self.number_value)
self.raster_step = int(self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def mode_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Set Mode = %s" % self.number_value)
self.mode = int(self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def append_distance(self, amount):
if self.x_on:
self.distance_x += amount
if self.y_on:
self.distance_y += amount
def execute_distance(self):
if self.distance_x != 0 or self.distance_y != 0:
dx = self.distance_x
dy = self.distance_y
if self.left:
dx = -dx
if self.top:
dy = -dy
self.distance_x = 0
self.distance_y = 0
ox = self.x
oy = self.y
self.x += dx
self.y += dy
if self.position:
self.position((ox, oy, self.x, self.y))
if self.channel:
self.channel("Moving (%d %d) now at %d %d" % (dx, dy, self.x, self.y))
def process(self, b, c):
if c == "I":
self.finish_state = False
self.compact_state = False
self.paused_state = False
self.distance_x = 0
self.distance_y = 0
if self.finish_state: # In finished all commands are black holed
return
if ord("0") <= b <= ord("9"):
self.number_consumer(c)
return
else:
self.number_consumer = self.distance_consumer
self.number_value = ""
if self.compact_state:
# Every command in compact state executes distances.
self.execute_distance()
if c == "|":
self.append_distance(25)
self.small_jump = True
elif ord("a") <= b <= ord("y"):
self.append_distance(b + 1 - ord("a"))
self.small_jump = False
elif c == "z":
self.append_distance(26 if self.small_jump else 255)
self.small_jump = False
elif c == "B": # Move to Right.
if self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = False
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Right")
elif c == "T": # Move to Left
if not self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = True
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Left")
elif c == "R": # Move to Bottom
if self.top and not self.horizontal_major:
# Was L switched to R with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = False
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Bottom")
elif c == "L": # Move to Top
if not self.top and not self.horizontal_major:
# Was R switched to L with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = True
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Top")
elif c == "U":
self.laser = 0
if self.channel:
self.channel("Laser Off")
elif c == "D":
self.laser = 1
if self.channel:
self.channel("Laser On")
elif c == "F":
if self.channel:
self.channel("Finish")
self.returning_compact = False
self.returning_finished = True
elif c == "@":
if self.channel:
self.channel("Reset")
self.returning_finished = False
self.returning_compact = False
elif c in "C":
if self.channel:
self.channel("Speedcode")
self.speed_code = ""
elif c in "V":
self.raster_step = None
if self.channel:
self.channel("Velocity")
self.number_consumer = self.speedcode_b1_consumer
elif c in "G":
if self.channel:
self.channel("Step Value")
self.number_consumer = self.raster_step_consumer
elif c == "S":
if self.channel:
self.channel("Mode Set")
self.laser = 0
self.execute_distance()
self.mode = None
self.number_consumer = self.mode_consumer
elif c == "E":
if self.channel:
self.channel("Execute State")
if self.mode is None:
if self.returning_compact:
self.compact_state = True
if self.returning_finished:
self.finish_state = True
if self.horizontal_major:
self.left = not self.left
self.x_on = True
self.y_on = False
if self.raster_step:
self.distance_y += self.raster_step
else:
# vertical major
self.top = not self.top
self.x_on = False
self.y_on = True
if self.raster_step:
self.distance_x += self.raster_step
elif self.mode == 0:
# Homes then moves position.
pass
elif self.mode == 1:
self.compact_state = True
self.horizontal_major = self.x_on
if self.channel:
self.channel("Setting Axis: h=" + str(self.x_on))
elif self.mode == 2:
# Rail unlocked.
self.compact_state = True
self.returning_finished = False
self.returning_compact = True
self.laser = 0
elif c == "P":
if self.channel:
self.channel("Pause")
self.laser = 0
if self.paused_state:
# Home sequence triggered by 2 P commands in the same packet.
# This should resume if not located within the same packet.
if self.position:
self.position((self.x, self.y, 0, 0))
self.x = 0
self.y = 0
self.distance_y = 0
self.distance_x = 0
self.finish_state = True
self.paused_state = False
else:
self.execute_distance() # distance is executed by a P command
self.paused_state = True
elif c == "N":
if self.channel:
self.channel("N")
self.execute_distance() # distance is executed by an N command.
self.laser = 0
self.compact_state = False
if self.position:
self.position(None)
elif c == "M":
self.x_on = True
self.y_on = True
if self.channel:
a = "Top" if self.top else "Bottom"
b = "Left" if self.left else "Right"
self.channel("Diagonal %s %s" % (a, b))
class EGVBlob:
def __init__(self, data: bytearray, name=None):
self.name = name
self.data = data
self.operation = "blob"
self._cutcode = None
self._cut = None
def __repr__(self):
return "EGV(%s, %d bytes)" % (self.name, len(self.data))
def as_cutobjects(self):
parser = LihuiyuParser()
self._cutcode = CutCode()
self._cut = RawCut()
def new_cut():
if self._cut is not None and len(self._cut):
self._cutcode.append(self._cut)
self._cut = RawCut()
self._cut.settings = dict(parser.settings)
def position(p):
if p is None or self._cut is None:
new_cut()
return
from_x, from_y, to_x, to_y = p
if parser.program_mode:
if len(self._cut.plot) == 0:
self._cut.plot_append(int(from_x), int(from_y), parser.laser)
self._cut.plot_append(int(to_x), int(to_y), parser.laser)
else:
new_cut()
parser.position = position
parser.header_write(self.data)
cutcode = self._cutcode
self._cut = None
self._cutcode = None
return cutcode
def generate(self):
yield "blob", "egv", LihuiyuParser.remove_header(self.data)
class EgvLoader:
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
@staticmethod
def load_types():
yield "Engrave Files", ("egv",), "application/x-egv"
@staticmethod
def load(kernel, elements_modifier, pathname, **kwargs):
import os
basename = os.path.basename(pathname)
with open(pathname, "rb") as f:
op_branch = elements_modifier.get(type="branch ops")
op_branch.add(
data=bytearray(EgvLoader.remove_header(f.read())),
data_type="egv",
type="blob",
name=basename,
)
return True
| from meerk40t.core.cutcode import CutCode, RawCut
from meerk40t.core.parameters import Parameters
from meerk40t.core.units import UNITS_PER_MIL
from meerk40t.kernel import Module
from meerk40t.numpath import Numpath
from meerk40t.svgelements import Color
class LihuiyuEmulator(Module):
def __init__(self, context, path):
Module.__init__(self, context, path)
self.context.setting(bool, "fix_speeds", False)
self.parser = LihuiyuParser()
self.parser.fix_speeds = self.context.fix_speeds
self.parser.channel = self.context.channel("lhy")
def pos(p):
if p is None:
return
x0, y0, x1, y1 = p
self.context.signal("emulator;position", (x0, y0, x1, y1))
self.parser.position = pos
def __repr__(self):
return "LihuiyuEmulator(%s)" % self.name
def initialize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.watch(self.parser.write_packet)
def finalize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.unwatch(self.parser.write_packet)
class LihuiyuParser:
"""
LihuiyuParser parses LHYMicro-GL code with a state diagram. This should accurately reconstruct the values.
When the position is changed it calls a self.position() function if one exists.
"""
def __init__(self):
self.channel = None
self.position = None
self.board = "M2"
self.header_skipped = False
self.count_lines = 0
self.count_flag = 0
self.settings = Parameters({"speed": 20.0, "power": 1000.0})
self.speed_code = None
self.x = 0.0
self.y = 0.0
self.number_value = ""
self.distance_x = 0
self.distance_y = 0
self.filename = ""
self.laser = 0
self.left = False
self.top = False
self.x_on = False
self.y_on = False
self.small_jump = False
self.returning_compact = True
self.returning_finished = False
self.mode = None
self.raster_step = 0
self.paused_state = False
self.compact_state = False
self.finish_state = False
self.horizontal_major = False
self.fix_speeds = False
self.number_consumer = {}
def parse(self, data, elements):
self.path = Numpath()
def position(p):
if p is None:
return
from_x, from_y, to_x, to_y = p
if self.program_mode:
if self.laser:
self.path.line(complex(from_x, from_y), complex(to_x, to_y))
self.position = position
self.write(data)
self.path.uscale(UNITS_PER_MIL)
elements.elem_branch.add(
type="elem numpath",
path=self.path,
stroke=Color("black"),
**self.settings.settings,
)
elements.signal("refresh_scene", 0)
@property
def program_mode(self):
return self.compact_state
@property
def default_mode(self):
return not self.compact_state
@property
def raster_mode(self):
return self.settings.get("raster_step", 0) != 0
def new_file(self):
self.header_skipped = False
self.count_flag = 0
self.count_lines = 0
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
def header_write(self, data):
"""
Write data to the emulator including the header. This is intended for saved .egv files which include a default
header.
"""
if self.header_skipped:
self.write(data)
else:
data = LihuiyuParser.remove_header(data)
self.write(data)
def write_packet(self, packet):
self.write(packet[1:31])
def write(self, data):
for b in data:
self.process(b, chr(b))
def distance_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
self.append_distance(int(self.number_value))
self.number_value = ""
def speedcode_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_b2_consumer
def speedcode_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_accel_consumer
def speedcode_accel_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def speedcode_mult_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b1_consumer
def speedcode_dratio_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b2_consumer
def speedcode_dratio_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def raster_step_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Raster Step = %s" % self.number_value)
self.raster_step = int(self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def mode_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Set Mode = %s" % self.number_value)
self.mode = int(self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def append_distance(self, amount):
if self.x_on:
self.distance_x += amount
if self.y_on:
self.distance_y += amount
def execute_distance(self):
if self.distance_x != 0 or self.distance_y != 0:
dx = self.distance_x
dy = self.distance_y
if self.left:
dx = -dx
if self.top:
dy = -dy
self.distance_x = 0
self.distance_y = 0
ox = self.x
oy = self.y
self.x += dx
self.y += dy
if self.position:
self.position((ox, oy, self.x, self.y))
if self.channel:
self.channel("Moving (%d %d) now at %d %d" % (dx, dy, self.x, self.y))
def process(self, b, c):
if c == "I":
self.finish_state = False
self.compact_state = False
self.paused_state = False
self.distance_x = 0
self.distance_y = 0
if self.finish_state: # In finished all commands are black holed
return
if ord("0") <= b <= ord("9"):
self.number_consumer(c)
return
else:
self.number_consumer = self.distance_consumer
self.number_value = ""
if self.compact_state:
# Every command in compact state executes distances.
self.execute_distance()
if c == "|":
self.append_distance(25)
self.small_jump = True
elif ord("a") <= b <= ord("y"):
self.append_distance(b + 1 - ord("a"))
self.small_jump = False
elif c == "z":
self.append_distance(26 if self.small_jump else 255)
self.small_jump = False
elif c == "B": # Move to Right.
if self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = False
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Right")
elif c == "T": # Move to Left
if not self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = True
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Left")
elif c == "R": # Move to Bottom
if self.top and not self.horizontal_major:
# Was L switched to R with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = False
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Bottom")
elif c == "L": # Move to Top
if not self.top and not self.horizontal_major:
# Was R switched to L with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = True
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Top")
elif c == "U":
self.laser = 0
if self.channel:
self.channel("Laser Off")
elif c == "D":
self.laser = 1
if self.channel:
self.channel("Laser On")
elif c == "F":
if self.channel:
self.channel("Finish")
self.returning_compact = False
self.returning_finished = True
elif c == "@":
if self.channel:
self.channel("Reset")
self.returning_finished = False
self.returning_compact = False
elif c in "C":
if self.channel:
self.channel("Speedcode")
self.speed_code = ""
elif c in "V":
self.raster_step = None
if self.channel:
self.channel("Velocity")
self.number_consumer = self.speedcode_b1_consumer
elif c in "G":
if self.channel:
self.channel("Step Value")
self.number_consumer = self.raster_step_consumer
elif c == "S":
if self.channel:
self.channel("Mode Set")
self.laser = 0
self.execute_distance()
self.mode = None
self.number_consumer = self.mode_consumer
elif c == "E":
if self.channel:
self.channel("Execute State")
if self.mode is None:
if self.returning_compact:
self.compact_state = True
if self.returning_finished:
self.finish_state = True
if self.horizontal_major:
self.left = not self.left
self.x_on = True
self.y_on = False
if self.raster_step:
self.distance_y += self.raster_step
else:
# vertical major
self.top = not self.top
self.x_on = False
self.y_on = True
if self.raster_step:
self.distance_x += self.raster_step
elif self.mode == 0:
# Homes then moves position.
pass
elif self.mode == 1:
self.compact_state = True
self.horizontal_major = self.x_on
if self.channel:
self.channel("Setting Axis: h=" + str(self.x_on))
elif self.mode == 2:
# Rail unlocked.
self.compact_state = True
self.returning_finished = False
self.returning_compact = True
self.laser = 0
elif c == "P":
if self.channel:
self.channel("Pause")
self.laser = 0
if self.paused_state:
# Home sequence triggered by 2 P commands in the same packet.
# This should resume if not located within the same packet.
if self.position:
self.position((self.x, self.y, 0, 0))
self.x = 0
self.y = 0
self.distance_y = 0
self.distance_x = 0
self.finish_state = True
self.paused_state = False
else:
self.execute_distance() # distance is executed by a P command
self.paused_state = True
elif c == "N":
if self.channel:
self.channel("N")
self.execute_distance() # distance is executed by an N command.
self.laser = 0
self.compact_state = False
if self.position:
self.position(None)
elif c == "M":
self.x_on = True
self.y_on = True
if self.channel:
a = "Top" if self.top else "Bottom"
b = "Left" if self.left else "Right"
self.channel("Diagonal %s %s" % (a, b))
class EGVBlob:
def __init__(self, data: bytearray, name=None):
self.name = name
self.data = data
self.operation = "blob"
self._cutcode = None
self._cut = None
def __repr__(self):
return "EGV(%s, %d bytes)" % (self.name, len(self.data))
def as_cutobjects(self):
parser = LihuiyuParser()
self._cutcode = CutCode()
self._cut = RawCut()
def new_cut():
if self._cut is not None and len(self._cut):
self._cutcode.append(self._cut)
self._cut = RawCut()
self._cut.settings = dict(parser.settings)
def position(p):
if p is None or self._cut is None:
new_cut()
return
from_x, from_y, to_x, to_y = p
if parser.program_mode:
if len(self._cut.plot) == 0:
self._cut.plot_append(int(from_x), int(from_y), parser.laser)
self._cut.plot_append(int(to_x), int(to_y), parser.laser)
else:
new_cut()
parser.position = position
parser.header_write(self.data)
cutcode = self._cutcode
self._cut = None
self._cutcode = None
return cutcode
def generate(self):
yield "blob", "egv", LihuiyuParser.remove_header(self.data)
class EgvLoader:
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
@staticmethod
def load_types():
yield "Engrave Files", ("egv",), "application/x-egv"
@staticmethod
def load(kernel, elements_modifier, pathname, **kwargs):
import os
basename = os.path.basename(pathname)
with open(pathname, "rb") as f:
op_branch = elements_modifier.get(type="branch ops")
op_branch.add(
data=bytearray(EgvLoader.remove_header(f.read())),
data_type="egv",
type="blob",
name=basename,
)
return True | en | 0.921286 | LihuiyuParser parses LHYMicro-GL code with a state diagram. This should accurately reconstruct the values. When the position is changed it calls a self.position() function if one exists. Write data to the emulator including the header. This is intended for saved .egv files which include a default header. # In finished all commands are black holed # Every command in compact state executes distances. # Move to Right. # Was T switched to B with horizontal rastering. # Move to Left # Was T switched to B with horizontal rastering. # Move to Bottom # Was L switched to R with vertical rastering. # Move to Top # Was R switched to L with vertical rastering. # vertical major # Homes then moves position. # Rail unlocked. # Home sequence triggered by 2 P commands in the same packet. # This should resume if not located within the same packet. # distance is executed by a P command # distance is executed by an N command. | 2.168633 | 2 |
tencentcloud/dbbrain/v20210527/models.py | lleiyyang/tencentcloud-sdk-python | 465 | 794 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddUserContactRequest(AbstractModel):
"""AddUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Name: 联系人姓名,由中英文、数字、空格、!@#$%^&*()_+-=()组成,不能以下划线开头,长度在20以内。
:type Name: str
:param ContactInfo: 邮箱地址,支持大小写字母、数字、下划线及@字符, 不能以下划线开头,邮箱地址不可重复。
:type ContactInfo: str
:param Product: 服务产品类型,固定值:"mysql"。
:type Product: str
"""
self.Name = None
self.ContactInfo = None
self.Product = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.ContactInfo = params.get("ContactInfo")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AddUserContactResponse(AbstractModel):
"""AddUserContact返回参数结构体
"""
def __init__(self):
r"""
:param Id: 添加成功的联系人id。
:type Id: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Id = None
self.RequestId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.RequestId = params.get("RequestId")
class ContactItem(AbstractModel):
"""联系人contact描述。
"""
def __init__(self):
r"""
:param Id: 联系人id。
:type Id: int
:param Name: 联系人姓名。
:type Name: str
:param Mail: 联系人绑定的邮箱。
:type Mail: str
"""
self.Id = None
self.Name = None
self.Mail = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.Mail = params.get("Mail")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskRequest(AbstractModel):
"""CreateDBDiagReportTask请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。
:type StartTime: str
:param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。
:type EndTime: str
:param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。
:type SendMailFlag: int
:param ContactPerson: 接收邮件的联系人ID数组。
:type ContactPerson: list of int
:param ContactGroup: 接收邮件的联系组ID数组。
:type ContactGroup: list of int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SendMailFlag = None
self.ContactPerson = None
self.ContactGroup = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SendMailFlag = params.get("SendMailFlag")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskResponse(AbstractModel):
"""CreateDBDiagReportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。
注意:此字段可能返回 null,表示取不到有效值。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class CreateDBDiagReportUrlRequest(AbstractModel):
"""CreateDBDiagReportUrl请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportUrlResponse(AbstractModel):
"""CreateDBDiagReportUrl返回参数结构体
"""
def __init__(self):
r"""
:param ReportUrl: 健康报告浏览地址。
:type ReportUrl: str
:param ExpireTime: 健康报告浏览地址到期时间戳(秒)。
:type ExpireTime: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ReportUrl = None
self.ExpireTime = None
self.RequestId = None
def _deserialize(self, params):
self.ReportUrl = params.get("ReportUrl")
self.ExpireTime = params.get("ExpireTime")
self.RequestId = params.get("RequestId")
class CreateMailProfileRequest(AbstractModel):
"""CreateMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
:type ProfileLevel: str
:param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。
:type BindInstanceIds: list of str
"""
self.ProfileInfo = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileType = None
self.Product = None
self.BindInstanceIds = None
def _deserialize(self, params):
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.BindInstanceIds = params.get("BindInstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateMailProfileResponse(AbstractModel):
"""CreateMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSchedulerMailProfileRequest(AbstractModel):
"""CreateSchedulerMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param WeekConfiguration: 取值范围1-7,分别代表周一至周日。
:type WeekConfiguration: list of int
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param BindInstanceId: 配置订阅的实例ID。
:type BindInstanceId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.WeekConfiguration = None
self.ProfileInfo = None
self.ProfileName = None
self.BindInstanceId = None
self.Product = None
def _deserialize(self, params):
self.WeekConfiguration = params.get("WeekConfiguration")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileName = params.get("ProfileName")
self.BindInstanceId = params.get("BindInstanceId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSchedulerMailProfileResponse(AbstractModel):
"""CreateSchedulerMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSecurityAuditLogExportTaskRequest(AbstractModel):
"""CreateSecurityAuditLogExportTask请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。
:type StartTime: str
:param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。
:type DangerLevels: list of int
"""
self.SecAuditGroupId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.DangerLevels = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSecurityAuditLogExportTaskResponse(AbstractModel):
"""CreateSecurityAuditLogExportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 日志导出任务Id。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class DeleteSecurityAuditLogExportTasksRequest(AbstractModel):
"""DeleteSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。
:type AsyncRequestIds: list of int non-negative
:param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestIds = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteSecurityAuditLogExportTasksResponse(AbstractModel):
"""DeleteSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAllUserContactRequest(AbstractModel):
"""DescribeAllUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系人名数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserContactResponse(AbstractModel):
"""DescribeAllUserContact返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 联系人的总数量。
:type TotalCount: int
:param Contacts: 联系人的信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Contacts: list of ContactItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Contacts = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Contacts") is not None:
self.Contacts = []
for item in params.get("Contacts"):
obj = ContactItem()
obj._deserialize(item)
self.Contacts.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAllUserGroupRequest(AbstractModel):
"""DescribeAllUserGroup请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系组名称数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserGroupResponse(AbstractModel):
"""DescribeAllUserGroup返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 组总数。
:type TotalCount: int
:param Groups: 组信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Groups: list of GroupItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Groups = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Groups") is not None:
self.Groups = []
for item in params.get("Groups"):
obj = GroupItem()
obj._deserialize(item)
self.Groups.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagEventRequest(AbstractModel):
"""DescribeDBDiagEvent请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。
:type EventId: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.EventId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.EventId = params.get("EventId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagEventResponse(AbstractModel):
"""DescribeDBDiagEvent返回参数结构体
"""
def __init__(self):
r"""
:param DiagItem: 诊断项。
:type DiagItem: str
:param DiagType: 诊断类型。
:type DiagType: str
:param EventId: 事件 ID 。
:type EventId: int
:param Explanation: 诊断事件详情,若无附加解释信息则输出为空。
:type Explanation: str
:param Outline: 诊断概要。
:type Outline: str
:param Problem: 诊断出的问题。
:type Problem: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param StartTime: 开始时间
:type StartTime: str
:param Suggestions: 诊断建议,若无建议则输出为空。
:type Suggestions: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param EndTime: 结束时间。
:type EndTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiagItem = None
self.DiagType = None
self.EventId = None
self.Explanation = None
self.Outline = None
self.Problem = None
self.Severity = None
self.StartTime = None
self.Suggestions = None
self.Metric = None
self.EndTime = None
self.RequestId = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.DiagType = params.get("DiagType")
self.EventId = params.get("EventId")
self.Explanation = params.get("Explanation")
self.Outline = params.get("Outline")
self.Problem = params.get("Problem")
self.Severity = params.get("Severity")
self.StartTime = params.get("StartTime")
self.Suggestions = params.get("Suggestions")
self.Metric = params.get("Metric")
self.EndTime = params.get("EndTime")
self.RequestId = params.get("RequestId")
class DescribeDBDiagHistoryRequest(AbstractModel):
"""DescribeDBDiagHistory请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagHistoryResponse(AbstractModel):
"""DescribeDBDiagHistory返回参数结构体
"""
def __init__(self):
r"""
:param Events: 事件描述。
:type Events: list of DiagHistoryEventItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Events = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = DiagHistoryEventItem()
obj._deserialize(item)
self.Events.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagReportTasksRequest(AbstractModel):
"""DescribeDBDiagReportTasks请求参数结构体
"""
def __init__(self):
r"""
:param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。
:type InstanceIds: list of str
:param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Sources: list of str
:param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。
:type HealthLevels: str
:param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。
:type TaskStatuses: str
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.StartTime = None
self.EndTime = None
self.InstanceIds = None
self.Sources = None
self.HealthLevels = None
self.TaskStatuses = None
self.Offset = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.InstanceIds = params.get("InstanceIds")
self.Sources = params.get("Sources")
self.HealthLevels = params.get("HealthLevels")
self.TaskStatuses = params.get("TaskStatuses")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagReportTasksResponse(AbstractModel):
"""DescribeDBDiagReportTasks返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 任务总数目。
:type TotalCount: int
:param Tasks: 任务列表。
:type Tasks: list of HealthReportTask
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Tasks = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = HealthReportTask()
obj._deserialize(item)
self.Tasks.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBSpaceStatusRequest(AbstractModel):
"""DescribeDBSpaceStatus请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param RangeDays: 时间段天数,截止日期为当日,默认为7天。
:type RangeDays: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.RangeDays = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.RangeDays = params.get("RangeDays")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBSpaceStatusResponse(AbstractModel):
"""DescribeDBSpaceStatus返回参数结构体
"""
def __init__(self):
r"""
:param Growth: 磁盘增长量(MB)。
:type Growth: int
:param Remain: 磁盘剩余(MB)。
:type Remain: int
:param Total: 磁盘总量(MB)。
:type Total: int
:param AvailableDays: 预计可用天数。
:type AvailableDays: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Growth = None
self.Remain = None
self.Total = None
self.AvailableDays = None
self.RequestId = None
def _deserialize(self, params):
self.Growth = params.get("Growth")
self.Remain = params.get("Remain")
self.Total = params.get("Total")
self.AvailableDays = params.get("AvailableDays")
self.RequestId = params.get("RequestId")
class DescribeDiagDBInstancesRequest(AbstractModel):
"""DescribeDiagDBInstances请求参数结构体
"""
def __init__(self):
r"""
:param IsSupported: 是否是DBbrain支持的实例,固定传 true。
:type IsSupported: bool
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页参数,偏移量。
:type Offset: int
:param Limit: 分页参数,分页值,最大值为100。
:type Limit: int
:param InstanceNames: 根据实例名称条件查询。
:type InstanceNames: list of str
:param InstanceIds: 根据实例ID条件查询。
:type InstanceIds: list of str
:param Regions: 根据地域条件查询。
:type Regions: list of str
"""
self.IsSupported = None
self.Product = None
self.Offset = None
self.Limit = None
self.InstanceNames = None
self.InstanceIds = None
self.Regions = None
def _deserialize(self, params):
self.IsSupported = params.get("IsSupported")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.InstanceNames = params.get("InstanceNames")
self.InstanceIds = params.get("InstanceIds")
self.Regions = params.get("Regions")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiagDBInstancesResponse(AbstractModel):
"""DescribeDiagDBInstances返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 实例总数。
:type TotalCount: int
:param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。
:type DbScanStatus: int
:param Items: 实例相关信息。
:type Items: list of InstanceInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.DbScanStatus = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.DbScanStatus = params.get("DbScanStatus")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = InstanceInfo()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeHealthScoreRequest(AbstractModel):
"""DescribeHealthScore请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 需要获取健康得分的实例ID。
:type InstanceId: str
:param Time: 获取健康得分的时间,时间格式如:2019-09-10 12:13:14。
:type Time: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Time = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Time = params.get("Time")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeHealthScoreResponse(AbstractModel):
"""DescribeHealthScore返回参数结构体
"""
def __init__(self):
r"""
:param Data: 健康得分以及异常扣分项。
:type Data: :class:`tencentcloud.dbbrain.v20210527.models.HealthScoreInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = HealthScoreInfo()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeMailProfileRequest(AbstractModel):
"""DescribeMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页偏移量。
:type Offset: int
:param Limit: 分页单位,最大支持50。
:type Limit: int
:param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。
:type ProfileName: str
"""
self.ProfileType = None
self.Product = None
self.Offset = None
self.Limit = None
self.ProfileName = None
def _deserialize(self, params):
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ProfileName = params.get("ProfileName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMailProfileResponse(AbstractModel):
"""DescribeMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param ProfileList: 邮件配置详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileList: list of UserProfile
:param TotalCount: 邮件模版总数。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProfileList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProfileList") is not None:
self.ProfileList = []
for item in params.get("ProfileList"):
obj = UserProfile()
obj._deserialize(item)
self.ProfileList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeMySqlProcessListRequest(AbstractModel):
"""DescribeMySqlProcessList请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param ID: 线程的ID,用于筛选线程列表。
:type ID: int
:param User: 线程的操作账号名,用于筛选线程列表。
:type User: str
:param Host: 线程的操作主机地址,用于筛选线程列表。
:type Host: str
:param DB: 线程的操作数据库,用于筛选线程列表。
:type DB: str
:param State: 线程的操作状态,用于筛选线程列表。
:type State: str
:param Command: 线程的执行类型,用于筛选线程列表。
:type Command: str
:param Time: 线程的操作时长最小值,单位秒,用于筛选操作时长大于该值的线程列表。
:type Time: int
:param Info: 线程的操作语句,用于筛选线程列表。
:type Info: str
:param Limit: 返回数量,默认20。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMySqlProcessListResponse(AbstractModel):
"""DescribeMySqlProcessList返回参数结构体
"""
def __init__(self):
r"""
:param ProcessList: 实时线程列表。
:type ProcessList: list of MySqlProcess
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProcessList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProcessList") is not None:
self.ProcessList = []
for item in params.get("ProcessList"):
obj = MySqlProcess()
obj._deserialize(item)
self.ProcessList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogDownloadUrlsRequest(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogDownloadUrlsResponse(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls返回参数结构体
"""
def __init__(self):
r"""
:param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。
:type Urls: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Urls = None
self.RequestId = None
def _deserialize(self, params):
self.Urls = params.get("Urls")
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogExportTasksRequest(AbstractModel):
"""DescribeSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param AsyncRequestIds: 日志导出任务Id列表。
:type AsyncRequestIds: list of int non-negative
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
"""
self.SecAuditGroupId = None
self.Product = None
self.AsyncRequestIds = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.Product = params.get("Product")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogExportTasksResponse(AbstractModel):
"""DescribeSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param Tasks: 安全审计日志导出任务列表。
:type Tasks: list of SecLogExportTaskInfo
:param TotalCount: 安全审计日志导出任务总数。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tasks = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = SecLogExportTaskInfo()
obj._deserialize(item)
self.Tasks.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeSlowLogTimeSeriesStatsRequest(AbstractModel):
"""DescribeSlowLogTimeSeriesStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTimeSeriesStatsResponse(AbstractModel):
"""DescribeSlowLogTimeSeriesStats返回参数结构体
"""
def __init__(self):
r"""
:param Period: 柱间单位时间间隔,单位为秒。
:type Period: int
:param TimeSeries: 单位时间间隔内慢日志数量统计。
:type TimeSeries: list of TimeSlice
:param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Period = None
self.TimeSeries = None
self.SeriesData = None
self.RequestId = None
def _deserialize(self, params):
self.Period = params.get("Period")
if params.get("TimeSeries") is not None:
self.TimeSeries = []
for item in params.get("TimeSeries"):
obj = TimeSlice()
obj._deserialize(item)
self.TimeSeries.append(obj)
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
self.RequestId = params.get("RequestId")
class DescribeSlowLogTopSqlsRequest(AbstractModel):
"""DescribeSlowLogTopSqls请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 截止时间,如“2019-09-11 10:13:14”,截止时间与开始时间的间隔小于7天。
:type EndTime: str
:param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键,默认为QueryTime。
:type SortBy: str
:param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序),默认为DESC。
:type OrderBy: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
:param Offset: 偏移量,默认为0。
:type Offset: int
:param SchemaList: 数据库名称数组。
:type SchemaList: list of SchemaItem
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SortBy = None
self.OrderBy = None
self.Limit = None
self.Offset = None
self.SchemaList = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SortBy = params.get("SortBy")
self.OrderBy = params.get("OrderBy")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("SchemaList") is not None:
self.SchemaList = []
for item in params.get("SchemaList"):
obj = SchemaItem()
obj._deserialize(item)
self.SchemaList.append(obj)
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTopSqlsResponse(AbstractModel):
"""DescribeSlowLogTopSqls返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 符合条件的记录总数。
:type TotalCount: int
:param Rows: 慢日志 top sql 列表
:type Rows: list of SlowLogTopSqlItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Rows = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Rows") is not None:
self.Rows = []
for item in params.get("Rows"):
obj = SlowLogTopSqlItem()
obj._deserialize(item)
self.Rows.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSlowLogUserHostStatsRequest(AbstractModel):
"""DescribeSlowLogUserHostStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.Md5 = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogUserHostStatsResponse(AbstractModel):
"""DescribeSlowLogUserHostStats返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 来源地址数目。
:type TotalCount: int
:param Items: 各来源地址的慢日志占比详情列表。
:type Items: list of SlowLogHost
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = SlowLogHost()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemaTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemaTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。
:type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemaTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemaTimeSeries") is not None:
self.TopSpaceSchemaTimeSeries = []
for item in params.get("TopSpaceSchemaTimeSeries"):
obj = SchemaSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceSchemaTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemasRequest(AbstractModel):
"""DescribeTopSpaceSchemas请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemasResponse(AbstractModel):
"""DescribeTopSpaceSchemas返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemas: 返回的Top库空间统计信息列表。
:type TopSpaceSchemas: list of SchemaSpaceData
:param Timestamp: 采集库空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemas = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemas") is not None:
self.TopSpaceSchemas = []
for item in params.get("TopSpaceSchemas"):
obj = SchemaSpaceData()
obj._deserialize(item)
self.TopSpaceSchemas.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTableTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceTableTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTableTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceTableTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。
:type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTableTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTableTimeSeries") is not None:
self.TopSpaceTableTimeSeries = []
for item in params.get("TopSpaceTableTimeSeries"):
obj = TableSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceTableTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTablesRequest(AbstractModel):
"""DescribeTopSpaceTables请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTablesResponse(AbstractModel):
"""DescribeTopSpaceTables返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTables: 返回的Top表空间统计信息列表。
:type TopSpaceTables: list of TableSpaceData
:param Timestamp: 采集表空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTables = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTables") is not None:
self.TopSpaceTables = []
for item in params.get("TopSpaceTables"):
obj = TableSpaceData()
obj._deserialize(item)
self.TopSpaceTables.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeUserSqlAdviceRequest(AbstractModel):
"""DescribeUserSqlAdvice请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
"""
self.InstanceId = None
self.SqlText = None
self.Schema = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeUserSqlAdviceResponse(AbstractModel):
"""DescribeUserSqlAdvice返回参数结构体
"""
def __init__(self):
r"""
:param Advices: SQL优化建议,可解析为JSON数组,无需优化时输出为空。
:type Advices: str
:param Comments: SQL优化建议备注,可解析为String数组,无需优化时输出为空。
:type Comments: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
:param Tables: 相关表的DDL信息,可解析为JSON数组。
:type Tables: str
:param SqlPlan: SQL执行计划,可解析为JSON,无需优化时输出为空。
:type SqlPlan: str
:param Cost: SQL优化后的成本节约详情,可解析为JSON,无需优化时输出为空。
:type Cost: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Advices = None
self.Comments = None
self.SqlText = None
self.Schema = None
self.Tables = None
self.SqlPlan = None
self.Cost = None
self.RequestId = None
def _deserialize(self, params):
self.Advices = params.get("Advices")
self.Comments = params.get("Comments")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.Tables = params.get("Tables")
self.SqlPlan = params.get("SqlPlan")
self.Cost = params.get("Cost")
self.RequestId = params.get("RequestId")
class DiagHistoryEventItem(AbstractModel):
"""实例诊断历史事件
"""
def __init__(self):
r"""
:param DiagType: 诊断类型。
:type DiagType: str
:param EndTime: 结束时间。
:type EndTime: str
:param StartTime: 开始时间。
:type StartTime: str
:param EventId: 事件唯一ID 。
:type EventId: int
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param Outline: 诊断概要。
:type Outline: str
:param DiagItem: 诊断项说明。
:type DiagItem: str
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param Region: 地域。
:type Region: str
"""
self.DiagType = None
self.EndTime = None
self.StartTime = None
self.EventId = None
self.Severity = None
self.Outline = None
self.DiagItem = None
self.InstanceId = None
self.Metric = None
self.Region = None
def _deserialize(self, params):
self.DiagType = params.get("DiagType")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.EventId = params.get("EventId")
self.Severity = params.get("Severity")
self.Outline = params.get("Outline")
self.DiagItem = params.get("DiagItem")
self.InstanceId = params.get("InstanceId")
self.Metric = params.get("Metric")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EventInfo(AbstractModel):
"""异常事件信息。
"""
def __init__(self):
r"""
:param EventId: 事件 ID 。
:type EventId: int
:param DiagType: 诊断类型。
:type DiagType: str
:param StartTime: 开始时间。
:type StartTime: str
:param EndTime: 结束时间。
:type EndTime: str
:param Outline: 概要。
:type Outline: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param ScoreLost: 扣分。
:type ScoreLost: int
:param Metric: 保留字段。
:type Metric: str
:param Count: 告警数目。
:type Count: int
"""
self.EventId = None
self.DiagType = None
self.StartTime = None
self.EndTime = None
self.Outline = None
self.Severity = None
self.ScoreLost = None
self.Metric = None
self.Count = None
def _deserialize(self, params):
self.EventId = params.get("EventId")
self.DiagType = params.get("DiagType")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Outline = params.get("Outline")
self.Severity = params.get("Severity")
self.ScoreLost = params.get("ScoreLost")
self.Metric = params.get("Metric")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GroupItem(AbstractModel):
"""描述组信息。
"""
def __init__(self):
r"""
:param Id: 组id。
:type Id: int
:param Name: 组名称。
:type Name: str
:param MemberCount: 组成员数量。
:type MemberCount: int
"""
self.Id = None
self.Name = None
self.MemberCount = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.MemberCount = params.get("MemberCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthReportTask(AbstractModel):
"""健康报告任务详情。
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务请求 ID。
:type AsyncRequestId: int
:param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Source: str
:param Progress: 任务完成进度,单位%。
:type Progress: int
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param StartTime: 任务开始执行时间。
:type StartTime: str
:param EndTime: 任务完成执行时间。
:type EndTime: str
:param InstanceInfo: 任务所属实例的基础信息。
:type InstanceInfo: :class:`tencentcloud.dbbrain.v20210527.models.InstanceBasicInfo`
:param HealthStatus: 健康报告中的健康信息。
:type HealthStatus: :class:`tencentcloud.dbbrain.v20210527.models.HealthStatus`
"""
self.AsyncRequestId = None
self.Source = None
self.Progress = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.InstanceInfo = None
self.HealthStatus = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.Source = params.get("Source")
self.Progress = params.get("Progress")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("InstanceInfo") is not None:
self.InstanceInfo = InstanceBasicInfo()
self.InstanceInfo._deserialize(params.get("InstanceInfo"))
if params.get("HealthStatus") is not None:
self.HealthStatus = HealthStatus()
self.HealthStatus._deserialize(params.get("HealthStatus"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthScoreInfo(AbstractModel):
"""获取健康得分返回的详情。
"""
def __init__(self):
r"""
:param IssueTypes: 异常详情。
:type IssueTypes: list of IssueTypeInfo
:param EventsTotalCount: 异常事件总数。
:type EventsTotalCount: int
:param HealthScore: 健康得分。
:type HealthScore: int
:param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。
:type HealthLevel: str
"""
self.IssueTypes = None
self.EventsTotalCount = None
self.HealthScore = None
self.HealthLevel = None
def _deserialize(self, params):
if params.get("IssueTypes") is not None:
self.IssueTypes = []
for item in params.get("IssueTypes"):
obj = IssueTypeInfo()
obj._deserialize(item)
self.IssueTypes.append(obj)
self.EventsTotalCount = params.get("EventsTotalCount")
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthStatus(AbstractModel):
"""实例健康详情。
"""
def __init__(self):
r"""
:param HealthScore: 健康分数,满分100。
:type HealthScore: int
:param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。
:type HealthLevel: str
:param ScoreLost: 总扣分分数。
:type ScoreLost: int
:param ScoreDetails: 扣分详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ScoreDetails: list of ScoreDetail
"""
self.HealthScore = None
self.HealthLevel = None
self.ScoreLost = None
self.ScoreDetails = None
def _deserialize(self, params):
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
self.ScoreLost = params.get("ScoreLost")
if params.get("ScoreDetails") is not None:
self.ScoreDetails = []
for item in params.get("ScoreDetails"):
obj = ScoreDetail()
obj._deserialize(item)
self.ScoreDetails.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceBasicInfo(AbstractModel):
"""实例基础信息。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Vip: 实例内网IP。
:type Vip: str
:param Vport: 实例内网Port。
:type Vport: int
:param Product: 实例产品。
:type Product: str
:param EngineVersion: 实例引擎版本。
:type EngineVersion: str
"""
self.InstanceId = None
self.InstanceName = None
self.Vip = None
self.Vport = None
self.Product = None
self.EngineVersion = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Product = params.get("Product")
self.EngineVersion = params.get("EngineVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceConfs(AbstractModel):
"""实例配置。
"""
def __init__(self):
r"""
:param DailyInspection: 数据库巡检开关, Yes/No。
:type DailyInspection: str
:param OverviewDisplay: 实例概览开关,Yes/No。
:type OverviewDisplay: str
"""
self.DailyInspection = None
self.OverviewDisplay = None
def _deserialize(self, params):
self.DailyInspection = params.get("DailyInspection")
self.OverviewDisplay = params.get("OverviewDisplay")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceInfo(AbstractModel):
"""查询实例列表,返回实例的相关信息的对象。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Region: 实例所属地域。
:type Region: str
:param HealthScore: 健康得分。
:type HealthScore: int
:param Product: 所属产品。
:type Product: str
:param EventCount: 异常事件数量。
:type EventCount: int
:param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。
:type InstanceType: int
:param Cpu: 核心数。
:type Cpu: int
:param Memory: 内存,单位MB。
:type Memory: int
:param Volume: 硬盘存储,单位GB。
:type Volume: int
:param EngineVersion: 数据库版本。
:type EngineVersion: str
:param Vip: 内网地址。
:type Vip: str
:param Vport: 内网端口。
:type Vport: int
:param Source: 接入来源。
:type Source: str
:param GroupId: 分组ID。
:type GroupId: str
:param GroupName: 分组组名。
:type GroupName: str
:param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。
:type Status: int
:param UniqSubnetId: 子网统一ID。
:type UniqSubnetId: str
:param DeployMode: cdb类型。
:type DeployMode: str
:param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。
:type InitFlag: int
:param TaskStatus: 任务状态。
:type TaskStatus: int
:param UniqVpcId: 私有网络统一ID。
:type UniqVpcId: str
:param InstanceConf: 实例巡检/概览的状态。
:type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param DeadlineTime: 资源到期时间。
:type DeadlineTime: str
:param IsSupported: 是否是DBbrain支持的实例。
:type IsSupported: bool
:param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。
:type SecAuditStatus: str
:param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。
:type AuditPolicyStatus: str
:param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。
:type AuditRunningStatus: str
"""
self.InstanceId = None
self.InstanceName = None
self.Region = None
self.HealthScore = None
self.Product = None
self.EventCount = None
self.InstanceType = None
self.Cpu = None
self.Memory = None
self.Volume = None
self.EngineVersion = None
self.Vip = None
self.Vport = None
self.Source = None
self.GroupId = None
self.GroupName = None
self.Status = None
self.UniqSubnetId = None
self.DeployMode = None
self.InitFlag = None
self.TaskStatus = None
self.UniqVpcId = None
self.InstanceConf = None
self.DeadlineTime = None
self.IsSupported = None
self.SecAuditStatus = None
self.AuditPolicyStatus = None
self.AuditRunningStatus = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Region = params.get("Region")
self.HealthScore = params.get("HealthScore")
self.Product = params.get("Product")
self.EventCount = params.get("EventCount")
self.InstanceType = params.get("InstanceType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.EngineVersion = params.get("EngineVersion")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Source = params.get("Source")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.Status = params.get("Status")
self.UniqSubnetId = params.get("UniqSubnetId")
self.DeployMode = params.get("DeployMode")
self.InitFlag = params.get("InitFlag")
self.TaskStatus = params.get("TaskStatus")
self.UniqVpcId = params.get("UniqVpcId")
if params.get("InstanceConf") is not None:
self.InstanceConf = InstanceConfs()
self.InstanceConf._deserialize(params.get("InstanceConf"))
self.DeadlineTime = params.get("DeadlineTime")
self.IsSupported = params.get("IsSupported")
self.SecAuditStatus = params.get("SecAuditStatus")
self.AuditPolicyStatus = params.get("AuditPolicyStatus")
self.AuditRunningStatus = params.get("AuditRunningStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IssueTypeInfo(AbstractModel):
"""指标信息。
"""
def __init__(self):
r"""
:param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。
:type IssueType: str
:param Events: 异常事件。
:type Events: list of EventInfo
:param TotalCount: 异常事件总数。
:type TotalCount: int
"""
self.IssueType = None
self.Events = None
self.TotalCount = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = EventInfo()
obj._deserialize(item)
self.Events.append(obj)
self.TotalCount = params.get("TotalCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsRequest(AbstractModel):
"""KillMySqlThreads请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。
:type Stage: str
:param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。
:type Threads: list of int
:param SqlExecId: 执行ID,此参数用于Commit阶段。
:type SqlExecId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Stage = None
self.Threads = None
self.SqlExecId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Stage = params.get("Stage")
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsResponse(AbstractModel):
"""KillMySqlThreads返回参数结构体
"""
def __init__(self):
r"""
:param Threads: kill完成的sql会话ID列表。
:type Threads: list of int
:param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。
注意:此字段可能返回 null,表示取不到有效值。
:type SqlExecId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Threads = None
self.SqlExecId = None
self.RequestId = None
def _deserialize(self, params):
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.RequestId = params.get("RequestId")
class MailConfiguration(AbstractModel):
"""邮件发送配置
"""
def __init__(self):
r"""
:param SendMail: 是否开启邮件发送: 0, 否; 1, 是。
:type SendMail: int
:param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。
:type Region: list of str
:param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。
:type HealthStatus: list of str
:param ContactPerson: 联系人id, 联系人/联系组不能都为空。
:type ContactPerson: list of int
:param ContactGroup: 联系组id, 联系人/联系组不能都为空。
:type ContactGroup: list of int
"""
self.SendMail = None
self.Region = None
self.HealthStatus = None
self.ContactPerson = None
self.ContactGroup = None
def _deserialize(self, params):
self.SendMail = params.get("SendMail")
self.Region = params.get("Region")
self.HealthStatus = params.get("HealthStatus")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfRequest(AbstractModel):
"""ModifyDiagDBInstanceConf请求参数结构体
"""
def __init__(self):
r"""
:param InstanceConfs: 实例配置,包括巡检、概览开关等。
:type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param Regions: 生效实例地域,取值为"All",代表全地域。
:type Regions: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param InstanceIds: 指定更改巡检状态的实例ID。
:type InstanceIds: list of str
"""
self.InstanceConfs = None
self.Regions = None
self.Product = None
self.InstanceIds = None
def _deserialize(self, params):
if params.get("InstanceConfs") is not None:
self.InstanceConfs = InstanceConfs()
self.InstanceConfs._deserialize(params.get("InstanceConfs"))
self.Regions = params.get("Regions")
self.Product = params.get("Product")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfResponse(AbstractModel):
"""ModifyDiagDBInstanceConf返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MonitorFloatMetric(AbstractModel):
"""监控数据(浮点型)
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorFloatMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据(浮点型)
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorFloatMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorFloatMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetric(AbstractModel):
"""监控数据
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MySqlProcess(AbstractModel):
"""关系型数据库线程
"""
def __init__(self):
r"""
:param ID: 线程ID。
:type ID: str
:param User: 线程的操作账号名。
:type User: str
:param Host: 线程的操作主机地址。
:type Host: str
:param DB: 线程的操作数据库。
:type DB: str
:param State: 线程的操作状态。
:type State: str
:param Command: 线程的执行类型。
:type Command: str
:param Time: 线程的操作时长,单位秒。
:type Time: str
:param Info: 线程的操作语句。
:type Info: str
"""
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
def _deserialize(self, params):
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProfileInfo(AbstractModel):
"""用户配置的信息
"""
def __init__(self):
r"""
:param Language: 语言, 如"zh"。
:type Language: str
:param MailConfiguration: 邮件模板的内容。
:type MailConfiguration: :class:`tencentcloud.dbbrain.v20210527.models.MailConfiguration`
"""
self.Language = None
self.MailConfiguration = None
def _deserialize(self, params):
self.Language = params.get("Language")
if params.get("MailConfiguration") is not None:
self.MailConfiguration = MailConfiguration()
self.MailConfiguration._deserialize(params.get("MailConfiguration"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaItem(AbstractModel):
"""SchemaItem数组
"""
def __init__(self):
r"""
:param Schema: 数据库名称
:type Schema: str
"""
self.Schema = None
def _deserialize(self, params):
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceData(AbstractModel):
"""库空间统计数据。
"""
def __init__(self):
r"""
:param TableSchema: 库名。
:type TableSchema: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。
注意:此字段可能返回 null,表示取不到有效值。
:type PhysicalFileSize: float
"""
self.TableSchema = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceTimeSeries(AbstractModel):
"""库空间时序数据
"""
def __init__(self):
r"""
:param TableSchema: 库名
:type TableSchema: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
"""
self.TableSchema = None
self.SeriesData = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreDetail(AbstractModel):
"""扣分详情。
"""
def __init__(self):
r"""
:param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param ScoreLost: 扣分总分。
:type ScoreLost: int
:param ScoreLostMax: 扣分总分上限。
:type ScoreLostMax: int
:param Items: 扣分项列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of ScoreItem
"""
self.IssueType = None
self.ScoreLost = None
self.ScoreLostMax = None
self.Items = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
self.ScoreLost = params.get("ScoreLost")
self.ScoreLostMax = params.get("ScoreLostMax")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = ScoreItem()
obj._deserialize(item)
self.Items.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreItem(AbstractModel):
"""诊断扣分项。
"""
def __init__(self):
r"""
:param DiagItem: 异常诊断项名称。
:type DiagItem: str
:param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。
:type TopSeverity: str
:param Count: 该异常诊断项出现次数。
:type Count: int
:param ScoreLost: 扣分分数。
:type ScoreLost: int
"""
self.DiagItem = None
self.IssueType = None
self.TopSeverity = None
self.Count = None
self.ScoreLost = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.IssueType = params.get("IssueType")
self.TopSeverity = params.get("TopSeverity")
self.Count = params.get("Count")
self.ScoreLost = params.get("ScoreLost")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SecLogExportTaskInfo(AbstractModel):
"""安全审计日志导出任务信息
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param StartTime: 任务开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
:param EndTime: 任务结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: str
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param Status: 任务状态。
:type Status: str
:param Progress: 任务执行进度。
:type Progress: int
:param LogStartTime: 导出日志开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogStartTime: str
:param LogEndTime: 导出日志结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogEndTime: str
:param TotalSize: 日志文件总大小,单位KB。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalSize: int
:param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。
注意:此字段可能返回 null,表示取不到有效值。
:type DangerLevels: list of int non-negative
"""
self.AsyncRequestId = None
self.StartTime = None
self.EndTime = None
self.CreateTime = None
self.Status = None
self.Progress = None
self.LogStartTime = None
self.LogEndTime = None
self.TotalSize = None
self.DangerLevels = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.Progress = params.get("Progress")
self.LogStartTime = params.get("LogStartTime")
self.LogEndTime = params.get("LogEndTime")
self.TotalSize = params.get("TotalSize")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogHost(AbstractModel):
"""慢日志来源地址详情。
"""
def __init__(self):
r"""
:param UserHost: 来源地址。
:type UserHost: str
:param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。
:type Ratio: float
:param Count: 该来源地址的慢日志数目。
:type Count: int
"""
self.UserHost = None
self.Ratio = None
self.Count = None
def _deserialize(self, params):
self.UserHost = params.get("UserHost")
self.Ratio = params.get("Ratio")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogTopSqlItem(AbstractModel):
"""慢日志TopSql
"""
def __init__(self):
r"""
:param LockTime: sql总锁等待时间,单位秒
:type LockTime: float
:param LockTimeMax: 最大锁等待时间,单位秒
:type LockTimeMax: float
:param LockTimeMin: 最小锁等待时间,单位秒
:type LockTimeMin: float
:param RowsExamined: 总扫描行数
:type RowsExamined: int
:param RowsExaminedMax: 最大扫描行数
:type RowsExaminedMax: int
:param RowsExaminedMin: 最小扫描行数
:type RowsExaminedMin: int
:param QueryTime: 总耗时,单位秒
:type QueryTime: float
:param QueryTimeMax: 最大执行时间,单位秒
:type QueryTimeMax: float
:param QueryTimeMin: 最小执行时间,单位秒
:type QueryTimeMin: float
:param RowsSent: 总返回行数
:type RowsSent: int
:param RowsSentMax: 最大返回行数
:type RowsSentMax: int
:param RowsSentMin: 最小返回行数
:type RowsSentMin: int
:param ExecTimes: 执行次数
:type ExecTimes: int
:param SqlTemplate: sql模板
:type SqlTemplate: str
:param SqlText: 带参数SQL(随机)
:type SqlText: str
:param Schema: 数据库名
:type Schema: str
:param QueryTimeRatio: 总耗时占比,单位%
:type QueryTimeRatio: float
:param LockTimeRatio: sql总锁等待时间占比,单位%
:type LockTimeRatio: float
:param RowsExaminedRatio: 总扫描行数占比,单位%
:type RowsExaminedRatio: float
:param RowsSentRatio: 总返回行数占比,单位%
:type RowsSentRatio: float
:param QueryTimeAvg: 平均执行时间,单位秒
:type QueryTimeAvg: float
:param RowsSentAvg: 平均返回行数
:type RowsSentAvg: float
:param LockTimeAvg: 平均锁等待时间,单位秒
:type LockTimeAvg: float
:param RowsExaminedAvg: 平均扫描行数
:type RowsExaminedAvg: float
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.LockTime = None
self.LockTimeMax = None
self.LockTimeMin = None
self.RowsExamined = None
self.RowsExaminedMax = None
self.RowsExaminedMin = None
self.QueryTime = None
self.QueryTimeMax = None
self.QueryTimeMin = None
self.RowsSent = None
self.RowsSentMax = None
self.RowsSentMin = None
self.ExecTimes = None
self.SqlTemplate = None
self.SqlText = None
self.Schema = None
self.QueryTimeRatio = None
self.LockTimeRatio = None
self.RowsExaminedRatio = None
self.RowsSentRatio = None
self.QueryTimeAvg = None
self.RowsSentAvg = None
self.LockTimeAvg = None
self.RowsExaminedAvg = None
self.Md5 = None
def _deserialize(self, params):
self.LockTime = params.get("LockTime")
self.LockTimeMax = params.get("LockTimeMax")
self.LockTimeMin = params.get("LockTimeMin")
self.RowsExamined = params.get("RowsExamined")
self.RowsExaminedMax = params.get("RowsExaminedMax")
self.RowsExaminedMin = params.get("RowsExaminedMin")
self.QueryTime = params.get("QueryTime")
self.QueryTimeMax = params.get("QueryTimeMax")
self.QueryTimeMin = params.get("QueryTimeMin")
self.RowsSent = params.get("RowsSent")
self.RowsSentMax = params.get("RowsSentMax")
self.RowsSentMin = params.get("RowsSentMin")
self.ExecTimes = params.get("ExecTimes")
self.SqlTemplate = params.get("SqlTemplate")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.QueryTimeRatio = params.get("QueryTimeRatio")
self.LockTimeRatio = params.get("LockTimeRatio")
self.RowsExaminedRatio = params.get("RowsExaminedRatio")
self.RowsSentRatio = params.get("RowsSentRatio")
self.QueryTimeAvg = params.get("QueryTimeAvg")
self.RowsSentAvg = params.get("RowsSentAvg")
self.LockTimeAvg = params.get("LockTimeAvg")
self.RowsExaminedAvg = params.get("RowsExaminedAvg")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceData(AbstractModel):
"""库表空间统计数据。
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 表对应的独立物理文件大小(MB)。
:type PhysicalFileSize: float
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceTimeSeries(AbstractModel):
"""库表空间时序数据
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorFloatMetricSeriesData`
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.SeriesData = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorFloatMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TimeSlice(AbstractModel):
"""单位时间间隔内的慢日志统计
"""
def __init__(self):
r"""
:param Count: 总数
:type Count: int
:param Timestamp: 统计开始时间
:type Timestamp: int
"""
self.Count = None
self.Timestamp = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserProfile(AbstractModel):
"""用户配置的相关信息,包括邮件配置。
"""
def __init__(self):
r"""
:param ProfileId: 配置的id。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileId: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileType: str
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileLevel: str
:param ProfileName: 配置名称。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileName: str
:param ProfileInfo: 配置详情。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
"""
self.ProfileId = None
self.ProfileType = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileInfo = None
def _deserialize(self, params):
self.ProfileId = params.get("ProfileId")
self.ProfileType = params.get("ProfileType")
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddUserContactRequest(AbstractModel):
"""AddUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Name: 联系人姓名,由中英文、数字、空格、!@#$%^&*()_+-=()组成,不能以下划线开头,长度在20以内。
:type Name: str
:param ContactInfo: 邮箱地址,支持大小写字母、数字、下划线及@字符, 不能以下划线开头,邮箱地址不可重复。
:type ContactInfo: str
:param Product: 服务产品类型,固定值:"mysql"。
:type Product: str
"""
self.Name = None
self.ContactInfo = None
self.Product = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.ContactInfo = params.get("ContactInfo")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AddUserContactResponse(AbstractModel):
"""AddUserContact返回参数结构体
"""
def __init__(self):
r"""
:param Id: 添加成功的联系人id。
:type Id: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Id = None
self.RequestId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.RequestId = params.get("RequestId")
class ContactItem(AbstractModel):
"""联系人contact描述。
"""
def __init__(self):
r"""
:param Id: 联系人id。
:type Id: int
:param Name: 联系人姓名。
:type Name: str
:param Mail: 联系人绑定的邮箱。
:type Mail: str
"""
self.Id = None
self.Name = None
self.Mail = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.Mail = params.get("Mail")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskRequest(AbstractModel):
"""CreateDBDiagReportTask请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。
:type StartTime: str
:param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。
:type EndTime: str
:param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。
:type SendMailFlag: int
:param ContactPerson: 接收邮件的联系人ID数组。
:type ContactPerson: list of int
:param ContactGroup: 接收邮件的联系组ID数组。
:type ContactGroup: list of int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SendMailFlag = None
self.ContactPerson = None
self.ContactGroup = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SendMailFlag = params.get("SendMailFlag")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskResponse(AbstractModel):
"""CreateDBDiagReportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。
注意:此字段可能返回 null,表示取不到有效值。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class CreateDBDiagReportUrlRequest(AbstractModel):
"""CreateDBDiagReportUrl请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportUrlResponse(AbstractModel):
"""CreateDBDiagReportUrl返回参数结构体
"""
def __init__(self):
r"""
:param ReportUrl: 健康报告浏览地址。
:type ReportUrl: str
:param ExpireTime: 健康报告浏览地址到期时间戳(秒)。
:type ExpireTime: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ReportUrl = None
self.ExpireTime = None
self.RequestId = None
def _deserialize(self, params):
self.ReportUrl = params.get("ReportUrl")
self.ExpireTime = params.get("ExpireTime")
self.RequestId = params.get("RequestId")
class CreateMailProfileRequest(AbstractModel):
"""CreateMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
:type ProfileLevel: str
:param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。
:type BindInstanceIds: list of str
"""
self.ProfileInfo = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileType = None
self.Product = None
self.BindInstanceIds = None
def _deserialize(self, params):
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.BindInstanceIds = params.get("BindInstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateMailProfileResponse(AbstractModel):
"""CreateMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSchedulerMailProfileRequest(AbstractModel):
"""CreateSchedulerMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param WeekConfiguration: 取值范围1-7,分别代表周一至周日。
:type WeekConfiguration: list of int
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param BindInstanceId: 配置订阅的实例ID。
:type BindInstanceId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.WeekConfiguration = None
self.ProfileInfo = None
self.ProfileName = None
self.BindInstanceId = None
self.Product = None
def _deserialize(self, params):
self.WeekConfiguration = params.get("WeekConfiguration")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileName = params.get("ProfileName")
self.BindInstanceId = params.get("BindInstanceId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSchedulerMailProfileResponse(AbstractModel):
"""CreateSchedulerMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSecurityAuditLogExportTaskRequest(AbstractModel):
"""CreateSecurityAuditLogExportTask请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。
:type StartTime: str
:param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。
:type DangerLevels: list of int
"""
self.SecAuditGroupId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.DangerLevels = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSecurityAuditLogExportTaskResponse(AbstractModel):
"""CreateSecurityAuditLogExportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 日志导出任务Id。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class DeleteSecurityAuditLogExportTasksRequest(AbstractModel):
"""DeleteSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。
:type AsyncRequestIds: list of int non-negative
:param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestIds = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteSecurityAuditLogExportTasksResponse(AbstractModel):
"""DeleteSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAllUserContactRequest(AbstractModel):
"""DescribeAllUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系人名数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserContactResponse(AbstractModel):
"""DescribeAllUserContact返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 联系人的总数量。
:type TotalCount: int
:param Contacts: 联系人的信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Contacts: list of ContactItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Contacts = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Contacts") is not None:
self.Contacts = []
for item in params.get("Contacts"):
obj = ContactItem()
obj._deserialize(item)
self.Contacts.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAllUserGroupRequest(AbstractModel):
"""DescribeAllUserGroup请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系组名称数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserGroupResponse(AbstractModel):
"""DescribeAllUserGroup返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 组总数。
:type TotalCount: int
:param Groups: 组信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Groups: list of GroupItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Groups = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Groups") is not None:
self.Groups = []
for item in params.get("Groups"):
obj = GroupItem()
obj._deserialize(item)
self.Groups.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagEventRequest(AbstractModel):
"""DescribeDBDiagEvent请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。
:type EventId: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.EventId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.EventId = params.get("EventId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagEventResponse(AbstractModel):
"""DescribeDBDiagEvent返回参数结构体
"""
def __init__(self):
r"""
:param DiagItem: 诊断项。
:type DiagItem: str
:param DiagType: 诊断类型。
:type DiagType: str
:param EventId: 事件 ID 。
:type EventId: int
:param Explanation: 诊断事件详情,若无附加解释信息则输出为空。
:type Explanation: str
:param Outline: 诊断概要。
:type Outline: str
:param Problem: 诊断出的问题。
:type Problem: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param StartTime: 开始时间
:type StartTime: str
:param Suggestions: 诊断建议,若无建议则输出为空。
:type Suggestions: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param EndTime: 结束时间。
:type EndTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiagItem = None
self.DiagType = None
self.EventId = None
self.Explanation = None
self.Outline = None
self.Problem = None
self.Severity = None
self.StartTime = None
self.Suggestions = None
self.Metric = None
self.EndTime = None
self.RequestId = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.DiagType = params.get("DiagType")
self.EventId = params.get("EventId")
self.Explanation = params.get("Explanation")
self.Outline = params.get("Outline")
self.Problem = params.get("Problem")
self.Severity = params.get("Severity")
self.StartTime = params.get("StartTime")
self.Suggestions = params.get("Suggestions")
self.Metric = params.get("Metric")
self.EndTime = params.get("EndTime")
self.RequestId = params.get("RequestId")
class DescribeDBDiagHistoryRequest(AbstractModel):
"""DescribeDBDiagHistory请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagHistoryResponse(AbstractModel):
"""DescribeDBDiagHistory返回参数结构体
"""
def __init__(self):
r"""
:param Events: 事件描述。
:type Events: list of DiagHistoryEventItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Events = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = DiagHistoryEventItem()
obj._deserialize(item)
self.Events.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagReportTasksRequest(AbstractModel):
"""DescribeDBDiagReportTasks请求参数结构体
"""
def __init__(self):
r"""
:param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。
:type InstanceIds: list of str
:param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Sources: list of str
:param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。
:type HealthLevels: str
:param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。
:type TaskStatuses: str
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.StartTime = None
self.EndTime = None
self.InstanceIds = None
self.Sources = None
self.HealthLevels = None
self.TaskStatuses = None
self.Offset = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.InstanceIds = params.get("InstanceIds")
self.Sources = params.get("Sources")
self.HealthLevels = params.get("HealthLevels")
self.TaskStatuses = params.get("TaskStatuses")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagReportTasksResponse(AbstractModel):
"""DescribeDBDiagReportTasks返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 任务总数目。
:type TotalCount: int
:param Tasks: 任务列表。
:type Tasks: list of HealthReportTask
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Tasks = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = HealthReportTask()
obj._deserialize(item)
self.Tasks.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBSpaceStatusRequest(AbstractModel):
"""DescribeDBSpaceStatus请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param RangeDays: 时间段天数,截止日期为当日,默认为7天。
:type RangeDays: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.RangeDays = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.RangeDays = params.get("RangeDays")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBSpaceStatusResponse(AbstractModel):
"""DescribeDBSpaceStatus返回参数结构体
"""
def __init__(self):
r"""
:param Growth: 磁盘增长量(MB)。
:type Growth: int
:param Remain: 磁盘剩余(MB)。
:type Remain: int
:param Total: 磁盘总量(MB)。
:type Total: int
:param AvailableDays: 预计可用天数。
:type AvailableDays: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Growth = None
self.Remain = None
self.Total = None
self.AvailableDays = None
self.RequestId = None
def _deserialize(self, params):
self.Growth = params.get("Growth")
self.Remain = params.get("Remain")
self.Total = params.get("Total")
self.AvailableDays = params.get("AvailableDays")
self.RequestId = params.get("RequestId")
class DescribeDiagDBInstancesRequest(AbstractModel):
"""DescribeDiagDBInstances请求参数结构体
"""
def __init__(self):
r"""
:param IsSupported: 是否是DBbrain支持的实例,固定传 true。
:type IsSupported: bool
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页参数,偏移量。
:type Offset: int
:param Limit: 分页参数,分页值,最大值为100。
:type Limit: int
:param InstanceNames: 根据实例名称条件查询。
:type InstanceNames: list of str
:param InstanceIds: 根据实例ID条件查询。
:type InstanceIds: list of str
:param Regions: 根据地域条件查询。
:type Regions: list of str
"""
self.IsSupported = None
self.Product = None
self.Offset = None
self.Limit = None
self.InstanceNames = None
self.InstanceIds = None
self.Regions = None
def _deserialize(self, params):
self.IsSupported = params.get("IsSupported")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.InstanceNames = params.get("InstanceNames")
self.InstanceIds = params.get("InstanceIds")
self.Regions = params.get("Regions")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiagDBInstancesResponse(AbstractModel):
"""DescribeDiagDBInstances返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 实例总数。
:type TotalCount: int
:param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。
:type DbScanStatus: int
:param Items: 实例相关信息。
:type Items: list of InstanceInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.DbScanStatus = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.DbScanStatus = params.get("DbScanStatus")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = InstanceInfo()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeHealthScoreRequest(AbstractModel):
"""DescribeHealthScore请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 需要获取健康得分的实例ID。
:type InstanceId: str
:param Time: 获取健康得分的时间,时间格式如:2019-09-10 12:13:14。
:type Time: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Time = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Time = params.get("Time")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeHealthScoreResponse(AbstractModel):
"""DescribeHealthScore返回参数结构体
"""
def __init__(self):
r"""
:param Data: 健康得分以及异常扣分项。
:type Data: :class:`tencentcloud.dbbrain.v20210527.models.HealthScoreInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = HealthScoreInfo()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeMailProfileRequest(AbstractModel):
"""DescribeMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页偏移量。
:type Offset: int
:param Limit: 分页单位,最大支持50。
:type Limit: int
:param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。
:type ProfileName: str
"""
self.ProfileType = None
self.Product = None
self.Offset = None
self.Limit = None
self.ProfileName = None
def _deserialize(self, params):
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ProfileName = params.get("ProfileName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMailProfileResponse(AbstractModel):
"""DescribeMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param ProfileList: 邮件配置详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileList: list of UserProfile
:param TotalCount: 邮件模版总数。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProfileList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProfileList") is not None:
self.ProfileList = []
for item in params.get("ProfileList"):
obj = UserProfile()
obj._deserialize(item)
self.ProfileList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeMySqlProcessListRequest(AbstractModel):
"""DescribeMySqlProcessList请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param ID: 线程的ID,用于筛选线程列表。
:type ID: int
:param User: 线程的操作账号名,用于筛选线程列表。
:type User: str
:param Host: 线程的操作主机地址,用于筛选线程列表。
:type Host: str
:param DB: 线程的操作数据库,用于筛选线程列表。
:type DB: str
:param State: 线程的操作状态,用于筛选线程列表。
:type State: str
:param Command: 线程的执行类型,用于筛选线程列表。
:type Command: str
:param Time: 线程的操作时长最小值,单位秒,用于筛选操作时长大于该值的线程列表。
:type Time: int
:param Info: 线程的操作语句,用于筛选线程列表。
:type Info: str
:param Limit: 返回数量,默认20。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMySqlProcessListResponse(AbstractModel):
"""DescribeMySqlProcessList返回参数结构体
"""
def __init__(self):
r"""
:param ProcessList: 实时线程列表。
:type ProcessList: list of MySqlProcess
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProcessList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProcessList") is not None:
self.ProcessList = []
for item in params.get("ProcessList"):
obj = MySqlProcess()
obj._deserialize(item)
self.ProcessList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogDownloadUrlsRequest(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogDownloadUrlsResponse(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls返回参数结构体
"""
def __init__(self):
r"""
:param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。
:type Urls: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Urls = None
self.RequestId = None
def _deserialize(self, params):
self.Urls = params.get("Urls")
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogExportTasksRequest(AbstractModel):
"""DescribeSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param AsyncRequestIds: 日志导出任务Id列表。
:type AsyncRequestIds: list of int non-negative
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
"""
self.SecAuditGroupId = None
self.Product = None
self.AsyncRequestIds = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.Product = params.get("Product")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogExportTasksResponse(AbstractModel):
"""DescribeSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param Tasks: 安全审计日志导出任务列表。
:type Tasks: list of SecLogExportTaskInfo
:param TotalCount: 安全审计日志导出任务总数。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tasks = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = SecLogExportTaskInfo()
obj._deserialize(item)
self.Tasks.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeSlowLogTimeSeriesStatsRequest(AbstractModel):
"""DescribeSlowLogTimeSeriesStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTimeSeriesStatsResponse(AbstractModel):
"""DescribeSlowLogTimeSeriesStats返回参数结构体
"""
def __init__(self):
r"""
:param Period: 柱间单位时间间隔,单位为秒。
:type Period: int
:param TimeSeries: 单位时间间隔内慢日志数量统计。
:type TimeSeries: list of TimeSlice
:param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Period = None
self.TimeSeries = None
self.SeriesData = None
self.RequestId = None
def _deserialize(self, params):
self.Period = params.get("Period")
if params.get("TimeSeries") is not None:
self.TimeSeries = []
for item in params.get("TimeSeries"):
obj = TimeSlice()
obj._deserialize(item)
self.TimeSeries.append(obj)
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
self.RequestId = params.get("RequestId")
class DescribeSlowLogTopSqlsRequest(AbstractModel):
"""DescribeSlowLogTopSqls请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 截止时间,如“2019-09-11 10:13:14”,截止时间与开始时间的间隔小于7天。
:type EndTime: str
:param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键,默认为QueryTime。
:type SortBy: str
:param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序),默认为DESC。
:type OrderBy: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
:param Offset: 偏移量,默认为0。
:type Offset: int
:param SchemaList: 数据库名称数组。
:type SchemaList: list of SchemaItem
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SortBy = None
self.OrderBy = None
self.Limit = None
self.Offset = None
self.SchemaList = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SortBy = params.get("SortBy")
self.OrderBy = params.get("OrderBy")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("SchemaList") is not None:
self.SchemaList = []
for item in params.get("SchemaList"):
obj = SchemaItem()
obj._deserialize(item)
self.SchemaList.append(obj)
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTopSqlsResponse(AbstractModel):
"""DescribeSlowLogTopSqls返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 符合条件的记录总数。
:type TotalCount: int
:param Rows: 慢日志 top sql 列表
:type Rows: list of SlowLogTopSqlItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Rows = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Rows") is not None:
self.Rows = []
for item in params.get("Rows"):
obj = SlowLogTopSqlItem()
obj._deserialize(item)
self.Rows.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSlowLogUserHostStatsRequest(AbstractModel):
"""DescribeSlowLogUserHostStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.Md5 = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogUserHostStatsResponse(AbstractModel):
"""DescribeSlowLogUserHostStats返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 来源地址数目。
:type TotalCount: int
:param Items: 各来源地址的慢日志占比详情列表。
:type Items: list of SlowLogHost
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = SlowLogHost()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemaTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemaTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。
:type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemaTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemaTimeSeries") is not None:
self.TopSpaceSchemaTimeSeries = []
for item in params.get("TopSpaceSchemaTimeSeries"):
obj = SchemaSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceSchemaTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemasRequest(AbstractModel):
"""DescribeTopSpaceSchemas请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemasResponse(AbstractModel):
"""DescribeTopSpaceSchemas返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemas: 返回的Top库空间统计信息列表。
:type TopSpaceSchemas: list of SchemaSpaceData
:param Timestamp: 采集库空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemas = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemas") is not None:
self.TopSpaceSchemas = []
for item in params.get("TopSpaceSchemas"):
obj = SchemaSpaceData()
obj._deserialize(item)
self.TopSpaceSchemas.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTableTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceTableTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTableTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceTableTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。
:type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTableTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTableTimeSeries") is not None:
self.TopSpaceTableTimeSeries = []
for item in params.get("TopSpaceTableTimeSeries"):
obj = TableSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceTableTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTablesRequest(AbstractModel):
"""DescribeTopSpaceTables请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTablesResponse(AbstractModel):
"""DescribeTopSpaceTables返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTables: 返回的Top表空间统计信息列表。
:type TopSpaceTables: list of TableSpaceData
:param Timestamp: 采集表空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTables = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTables") is not None:
self.TopSpaceTables = []
for item in params.get("TopSpaceTables"):
obj = TableSpaceData()
obj._deserialize(item)
self.TopSpaceTables.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeUserSqlAdviceRequest(AbstractModel):
"""DescribeUserSqlAdvice请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
"""
self.InstanceId = None
self.SqlText = None
self.Schema = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeUserSqlAdviceResponse(AbstractModel):
"""DescribeUserSqlAdvice返回参数结构体
"""
def __init__(self):
r"""
:param Advices: SQL优化建议,可解析为JSON数组,无需优化时输出为空。
:type Advices: str
:param Comments: SQL优化建议备注,可解析为String数组,无需优化时输出为空。
:type Comments: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
:param Tables: 相关表的DDL信息,可解析为JSON数组。
:type Tables: str
:param SqlPlan: SQL执行计划,可解析为JSON,无需优化时输出为空。
:type SqlPlan: str
:param Cost: SQL优化后的成本节约详情,可解析为JSON,无需优化时输出为空。
:type Cost: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Advices = None
self.Comments = None
self.SqlText = None
self.Schema = None
self.Tables = None
self.SqlPlan = None
self.Cost = None
self.RequestId = None
def _deserialize(self, params):
self.Advices = params.get("Advices")
self.Comments = params.get("Comments")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.Tables = params.get("Tables")
self.SqlPlan = params.get("SqlPlan")
self.Cost = params.get("Cost")
self.RequestId = params.get("RequestId")
class DiagHistoryEventItem(AbstractModel):
"""实例诊断历史事件
"""
def __init__(self):
r"""
:param DiagType: 诊断类型。
:type DiagType: str
:param EndTime: 结束时间。
:type EndTime: str
:param StartTime: 开始时间。
:type StartTime: str
:param EventId: 事件唯一ID 。
:type EventId: int
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param Outline: 诊断概要。
:type Outline: str
:param DiagItem: 诊断项说明。
:type DiagItem: str
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param Region: 地域。
:type Region: str
"""
self.DiagType = None
self.EndTime = None
self.StartTime = None
self.EventId = None
self.Severity = None
self.Outline = None
self.DiagItem = None
self.InstanceId = None
self.Metric = None
self.Region = None
def _deserialize(self, params):
self.DiagType = params.get("DiagType")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.EventId = params.get("EventId")
self.Severity = params.get("Severity")
self.Outline = params.get("Outline")
self.DiagItem = params.get("DiagItem")
self.InstanceId = params.get("InstanceId")
self.Metric = params.get("Metric")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EventInfo(AbstractModel):
"""异常事件信息。
"""
def __init__(self):
r"""
:param EventId: 事件 ID 。
:type EventId: int
:param DiagType: 诊断类型。
:type DiagType: str
:param StartTime: 开始时间。
:type StartTime: str
:param EndTime: 结束时间。
:type EndTime: str
:param Outline: 概要。
:type Outline: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param ScoreLost: 扣分。
:type ScoreLost: int
:param Metric: 保留字段。
:type Metric: str
:param Count: 告警数目。
:type Count: int
"""
self.EventId = None
self.DiagType = None
self.StartTime = None
self.EndTime = None
self.Outline = None
self.Severity = None
self.ScoreLost = None
self.Metric = None
self.Count = None
def _deserialize(self, params):
self.EventId = params.get("EventId")
self.DiagType = params.get("DiagType")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Outline = params.get("Outline")
self.Severity = params.get("Severity")
self.ScoreLost = params.get("ScoreLost")
self.Metric = params.get("Metric")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GroupItem(AbstractModel):
"""描述组信息。
"""
def __init__(self):
r"""
:param Id: 组id。
:type Id: int
:param Name: 组名称。
:type Name: str
:param MemberCount: 组成员数量。
:type MemberCount: int
"""
self.Id = None
self.Name = None
self.MemberCount = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.MemberCount = params.get("MemberCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthReportTask(AbstractModel):
"""健康报告任务详情。
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务请求 ID。
:type AsyncRequestId: int
:param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Source: str
:param Progress: 任务完成进度,单位%。
:type Progress: int
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param StartTime: 任务开始执行时间。
:type StartTime: str
:param EndTime: 任务完成执行时间。
:type EndTime: str
:param InstanceInfo: 任务所属实例的基础信息。
:type InstanceInfo: :class:`tencentcloud.dbbrain.v20210527.models.InstanceBasicInfo`
:param HealthStatus: 健康报告中的健康信息。
:type HealthStatus: :class:`tencentcloud.dbbrain.v20210527.models.HealthStatus`
"""
self.AsyncRequestId = None
self.Source = None
self.Progress = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.InstanceInfo = None
self.HealthStatus = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.Source = params.get("Source")
self.Progress = params.get("Progress")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("InstanceInfo") is not None:
self.InstanceInfo = InstanceBasicInfo()
self.InstanceInfo._deserialize(params.get("InstanceInfo"))
if params.get("HealthStatus") is not None:
self.HealthStatus = HealthStatus()
self.HealthStatus._deserialize(params.get("HealthStatus"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthScoreInfo(AbstractModel):
"""获取健康得分返回的详情。
"""
def __init__(self):
r"""
:param IssueTypes: 异常详情。
:type IssueTypes: list of IssueTypeInfo
:param EventsTotalCount: 异常事件总数。
:type EventsTotalCount: int
:param HealthScore: 健康得分。
:type HealthScore: int
:param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。
:type HealthLevel: str
"""
self.IssueTypes = None
self.EventsTotalCount = None
self.HealthScore = None
self.HealthLevel = None
def _deserialize(self, params):
if params.get("IssueTypes") is not None:
self.IssueTypes = []
for item in params.get("IssueTypes"):
obj = IssueTypeInfo()
obj._deserialize(item)
self.IssueTypes.append(obj)
self.EventsTotalCount = params.get("EventsTotalCount")
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthStatus(AbstractModel):
"""实例健康详情。
"""
def __init__(self):
r"""
:param HealthScore: 健康分数,满分100。
:type HealthScore: int
:param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。
:type HealthLevel: str
:param ScoreLost: 总扣分分数。
:type ScoreLost: int
:param ScoreDetails: 扣分详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ScoreDetails: list of ScoreDetail
"""
self.HealthScore = None
self.HealthLevel = None
self.ScoreLost = None
self.ScoreDetails = None
def _deserialize(self, params):
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
self.ScoreLost = params.get("ScoreLost")
if params.get("ScoreDetails") is not None:
self.ScoreDetails = []
for item in params.get("ScoreDetails"):
obj = ScoreDetail()
obj._deserialize(item)
self.ScoreDetails.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceBasicInfo(AbstractModel):
"""实例基础信息。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Vip: 实例内网IP。
:type Vip: str
:param Vport: 实例内网Port。
:type Vport: int
:param Product: 实例产品。
:type Product: str
:param EngineVersion: 实例引擎版本。
:type EngineVersion: str
"""
self.InstanceId = None
self.InstanceName = None
self.Vip = None
self.Vport = None
self.Product = None
self.EngineVersion = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Product = params.get("Product")
self.EngineVersion = params.get("EngineVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceConfs(AbstractModel):
"""实例配置。
"""
def __init__(self):
r"""
:param DailyInspection: 数据库巡检开关, Yes/No。
:type DailyInspection: str
:param OverviewDisplay: 实例概览开关,Yes/No。
:type OverviewDisplay: str
"""
self.DailyInspection = None
self.OverviewDisplay = None
def _deserialize(self, params):
self.DailyInspection = params.get("DailyInspection")
self.OverviewDisplay = params.get("OverviewDisplay")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceInfo(AbstractModel):
"""查询实例列表,返回实例的相关信息的对象。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Region: 实例所属地域。
:type Region: str
:param HealthScore: 健康得分。
:type HealthScore: int
:param Product: 所属产品。
:type Product: str
:param EventCount: 异常事件数量。
:type EventCount: int
:param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。
:type InstanceType: int
:param Cpu: 核心数。
:type Cpu: int
:param Memory: 内存,单位MB。
:type Memory: int
:param Volume: 硬盘存储,单位GB。
:type Volume: int
:param EngineVersion: 数据库版本。
:type EngineVersion: str
:param Vip: 内网地址。
:type Vip: str
:param Vport: 内网端口。
:type Vport: int
:param Source: 接入来源。
:type Source: str
:param GroupId: 分组ID。
:type GroupId: str
:param GroupName: 分组组名。
:type GroupName: str
:param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。
:type Status: int
:param UniqSubnetId: 子网统一ID。
:type UniqSubnetId: str
:param DeployMode: cdb类型。
:type DeployMode: str
:param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。
:type InitFlag: int
:param TaskStatus: 任务状态。
:type TaskStatus: int
:param UniqVpcId: 私有网络统一ID。
:type UniqVpcId: str
:param InstanceConf: 实例巡检/概览的状态。
:type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param DeadlineTime: 资源到期时间。
:type DeadlineTime: str
:param IsSupported: 是否是DBbrain支持的实例。
:type IsSupported: bool
:param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。
:type SecAuditStatus: str
:param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。
:type AuditPolicyStatus: str
:param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。
:type AuditRunningStatus: str
"""
self.InstanceId = None
self.InstanceName = None
self.Region = None
self.HealthScore = None
self.Product = None
self.EventCount = None
self.InstanceType = None
self.Cpu = None
self.Memory = None
self.Volume = None
self.EngineVersion = None
self.Vip = None
self.Vport = None
self.Source = None
self.GroupId = None
self.GroupName = None
self.Status = None
self.UniqSubnetId = None
self.DeployMode = None
self.InitFlag = None
self.TaskStatus = None
self.UniqVpcId = None
self.InstanceConf = None
self.DeadlineTime = None
self.IsSupported = None
self.SecAuditStatus = None
self.AuditPolicyStatus = None
self.AuditRunningStatus = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Region = params.get("Region")
self.HealthScore = params.get("HealthScore")
self.Product = params.get("Product")
self.EventCount = params.get("EventCount")
self.InstanceType = params.get("InstanceType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.EngineVersion = params.get("EngineVersion")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Source = params.get("Source")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.Status = params.get("Status")
self.UniqSubnetId = params.get("UniqSubnetId")
self.DeployMode = params.get("DeployMode")
self.InitFlag = params.get("InitFlag")
self.TaskStatus = params.get("TaskStatus")
self.UniqVpcId = params.get("UniqVpcId")
if params.get("InstanceConf") is not None:
self.InstanceConf = InstanceConfs()
self.InstanceConf._deserialize(params.get("InstanceConf"))
self.DeadlineTime = params.get("DeadlineTime")
self.IsSupported = params.get("IsSupported")
self.SecAuditStatus = params.get("SecAuditStatus")
self.AuditPolicyStatus = params.get("AuditPolicyStatus")
self.AuditRunningStatus = params.get("AuditRunningStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IssueTypeInfo(AbstractModel):
"""指标信息。
"""
def __init__(self):
r"""
:param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。
:type IssueType: str
:param Events: 异常事件。
:type Events: list of EventInfo
:param TotalCount: 异常事件总数。
:type TotalCount: int
"""
self.IssueType = None
self.Events = None
self.TotalCount = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = EventInfo()
obj._deserialize(item)
self.Events.append(obj)
self.TotalCount = params.get("TotalCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsRequest(AbstractModel):
"""KillMySqlThreads请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。
:type Stage: str
:param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。
:type Threads: list of int
:param SqlExecId: 执行ID,此参数用于Commit阶段。
:type SqlExecId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Stage = None
self.Threads = None
self.SqlExecId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Stage = params.get("Stage")
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsResponse(AbstractModel):
"""KillMySqlThreads返回参数结构体
"""
def __init__(self):
r"""
:param Threads: kill完成的sql会话ID列表。
:type Threads: list of int
:param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。
注意:此字段可能返回 null,表示取不到有效值。
:type SqlExecId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Threads = None
self.SqlExecId = None
self.RequestId = None
def _deserialize(self, params):
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.RequestId = params.get("RequestId")
class MailConfiguration(AbstractModel):
"""邮件发送配置
"""
def __init__(self):
r"""
:param SendMail: 是否开启邮件发送: 0, 否; 1, 是。
:type SendMail: int
:param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。
:type Region: list of str
:param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。
:type HealthStatus: list of str
:param ContactPerson: 联系人id, 联系人/联系组不能都为空。
:type ContactPerson: list of int
:param ContactGroup: 联系组id, 联系人/联系组不能都为空。
:type ContactGroup: list of int
"""
self.SendMail = None
self.Region = None
self.HealthStatus = None
self.ContactPerson = None
self.ContactGroup = None
def _deserialize(self, params):
self.SendMail = params.get("SendMail")
self.Region = params.get("Region")
self.HealthStatus = params.get("HealthStatus")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfRequest(AbstractModel):
"""ModifyDiagDBInstanceConf请求参数结构体
"""
def __init__(self):
r"""
:param InstanceConfs: 实例配置,包括巡检、概览开关等。
:type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param Regions: 生效实例地域,取值为"All",代表全地域。
:type Regions: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param InstanceIds: 指定更改巡检状态的实例ID。
:type InstanceIds: list of str
"""
self.InstanceConfs = None
self.Regions = None
self.Product = None
self.InstanceIds = None
def _deserialize(self, params):
if params.get("InstanceConfs") is not None:
self.InstanceConfs = InstanceConfs()
self.InstanceConfs._deserialize(params.get("InstanceConfs"))
self.Regions = params.get("Regions")
self.Product = params.get("Product")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfResponse(AbstractModel):
"""ModifyDiagDBInstanceConf返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MonitorFloatMetric(AbstractModel):
"""监控数据(浮点型)
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorFloatMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据(浮点型)
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorFloatMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorFloatMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetric(AbstractModel):
"""监控数据
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MySqlProcess(AbstractModel):
"""关系型数据库线程
"""
def __init__(self):
r"""
:param ID: 线程ID。
:type ID: str
:param User: 线程的操作账号名。
:type User: str
:param Host: 线程的操作主机地址。
:type Host: str
:param DB: 线程的操作数据库。
:type DB: str
:param State: 线程的操作状态。
:type State: str
:param Command: 线程的执行类型。
:type Command: str
:param Time: 线程的操作时长,单位秒。
:type Time: str
:param Info: 线程的操作语句。
:type Info: str
"""
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
def _deserialize(self, params):
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProfileInfo(AbstractModel):
"""用户配置的信息
"""
def __init__(self):
r"""
:param Language: 语言, 如"zh"。
:type Language: str
:param MailConfiguration: 邮件模板的内容。
:type MailConfiguration: :class:`tencentcloud.dbbrain.v20210527.models.MailConfiguration`
"""
self.Language = None
self.MailConfiguration = None
def _deserialize(self, params):
self.Language = params.get("Language")
if params.get("MailConfiguration") is not None:
self.MailConfiguration = MailConfiguration()
self.MailConfiguration._deserialize(params.get("MailConfiguration"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaItem(AbstractModel):
"""SchemaItem数组
"""
def __init__(self):
r"""
:param Schema: 数据库名称
:type Schema: str
"""
self.Schema = None
def _deserialize(self, params):
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceData(AbstractModel):
"""库空间统计数据。
"""
def __init__(self):
r"""
:param TableSchema: 库名。
:type TableSchema: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。
注意:此字段可能返回 null,表示取不到有效值。
:type PhysicalFileSize: float
"""
self.TableSchema = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceTimeSeries(AbstractModel):
"""库空间时序数据
"""
def __init__(self):
r"""
:param TableSchema: 库名
:type TableSchema: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
"""
self.TableSchema = None
self.SeriesData = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreDetail(AbstractModel):
"""扣分详情。
"""
def __init__(self):
r"""
:param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param ScoreLost: 扣分总分。
:type ScoreLost: int
:param ScoreLostMax: 扣分总分上限。
:type ScoreLostMax: int
:param Items: 扣分项列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of ScoreItem
"""
self.IssueType = None
self.ScoreLost = None
self.ScoreLostMax = None
self.Items = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
self.ScoreLost = params.get("ScoreLost")
self.ScoreLostMax = params.get("ScoreLostMax")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = ScoreItem()
obj._deserialize(item)
self.Items.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreItem(AbstractModel):
"""诊断扣分项。
"""
def __init__(self):
r"""
:param DiagItem: 异常诊断项名称。
:type DiagItem: str
:param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。
:type TopSeverity: str
:param Count: 该异常诊断项出现次数。
:type Count: int
:param ScoreLost: 扣分分数。
:type ScoreLost: int
"""
self.DiagItem = None
self.IssueType = None
self.TopSeverity = None
self.Count = None
self.ScoreLost = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.IssueType = params.get("IssueType")
self.TopSeverity = params.get("TopSeverity")
self.Count = params.get("Count")
self.ScoreLost = params.get("ScoreLost")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SecLogExportTaskInfo(AbstractModel):
"""安全审计日志导出任务信息
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param StartTime: 任务开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
:param EndTime: 任务结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: str
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param Status: 任务状态。
:type Status: str
:param Progress: 任务执行进度。
:type Progress: int
:param LogStartTime: 导出日志开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogStartTime: str
:param LogEndTime: 导出日志结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogEndTime: str
:param TotalSize: 日志文件总大小,单位KB。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalSize: int
:param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。
注意:此字段可能返回 null,表示取不到有效值。
:type DangerLevels: list of int non-negative
"""
self.AsyncRequestId = None
self.StartTime = None
self.EndTime = None
self.CreateTime = None
self.Status = None
self.Progress = None
self.LogStartTime = None
self.LogEndTime = None
self.TotalSize = None
self.DangerLevels = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.Progress = params.get("Progress")
self.LogStartTime = params.get("LogStartTime")
self.LogEndTime = params.get("LogEndTime")
self.TotalSize = params.get("TotalSize")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogHost(AbstractModel):
"""慢日志来源地址详情。
"""
def __init__(self):
r"""
:param UserHost: 来源地址。
:type UserHost: str
:param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。
:type Ratio: float
:param Count: 该来源地址的慢日志数目。
:type Count: int
"""
self.UserHost = None
self.Ratio = None
self.Count = None
def _deserialize(self, params):
self.UserHost = params.get("UserHost")
self.Ratio = params.get("Ratio")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogTopSqlItem(AbstractModel):
"""慢日志TopSql
"""
def __init__(self):
r"""
:param LockTime: sql总锁等待时间,单位秒
:type LockTime: float
:param LockTimeMax: 最大锁等待时间,单位秒
:type LockTimeMax: float
:param LockTimeMin: 最小锁等待时间,单位秒
:type LockTimeMin: float
:param RowsExamined: 总扫描行数
:type RowsExamined: int
:param RowsExaminedMax: 最大扫描行数
:type RowsExaminedMax: int
:param RowsExaminedMin: 最小扫描行数
:type RowsExaminedMin: int
:param QueryTime: 总耗时,单位秒
:type QueryTime: float
:param QueryTimeMax: 最大执行时间,单位秒
:type QueryTimeMax: float
:param QueryTimeMin: 最小执行时间,单位秒
:type QueryTimeMin: float
:param RowsSent: 总返回行数
:type RowsSent: int
:param RowsSentMax: 最大返回行数
:type RowsSentMax: int
:param RowsSentMin: 最小返回行数
:type RowsSentMin: int
:param ExecTimes: 执行次数
:type ExecTimes: int
:param SqlTemplate: sql模板
:type SqlTemplate: str
:param SqlText: 带参数SQL(随机)
:type SqlText: str
:param Schema: 数据库名
:type Schema: str
:param QueryTimeRatio: 总耗时占比,单位%
:type QueryTimeRatio: float
:param LockTimeRatio: sql总锁等待时间占比,单位%
:type LockTimeRatio: float
:param RowsExaminedRatio: 总扫描行数占比,单位%
:type RowsExaminedRatio: float
:param RowsSentRatio: 总返回行数占比,单位%
:type RowsSentRatio: float
:param QueryTimeAvg: 平均执行时间,单位秒
:type QueryTimeAvg: float
:param RowsSentAvg: 平均返回行数
:type RowsSentAvg: float
:param LockTimeAvg: 平均锁等待时间,单位秒
:type LockTimeAvg: float
:param RowsExaminedAvg: 平均扫描行数
:type RowsExaminedAvg: float
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.LockTime = None
self.LockTimeMax = None
self.LockTimeMin = None
self.RowsExamined = None
self.RowsExaminedMax = None
self.RowsExaminedMin = None
self.QueryTime = None
self.QueryTimeMax = None
self.QueryTimeMin = None
self.RowsSent = None
self.RowsSentMax = None
self.RowsSentMin = None
self.ExecTimes = None
self.SqlTemplate = None
self.SqlText = None
self.Schema = None
self.QueryTimeRatio = None
self.LockTimeRatio = None
self.RowsExaminedRatio = None
self.RowsSentRatio = None
self.QueryTimeAvg = None
self.RowsSentAvg = None
self.LockTimeAvg = None
self.RowsExaminedAvg = None
self.Md5 = None
def _deserialize(self, params):
self.LockTime = params.get("LockTime")
self.LockTimeMax = params.get("LockTimeMax")
self.LockTimeMin = params.get("LockTimeMin")
self.RowsExamined = params.get("RowsExamined")
self.RowsExaminedMax = params.get("RowsExaminedMax")
self.RowsExaminedMin = params.get("RowsExaminedMin")
self.QueryTime = params.get("QueryTime")
self.QueryTimeMax = params.get("QueryTimeMax")
self.QueryTimeMin = params.get("QueryTimeMin")
self.RowsSent = params.get("RowsSent")
self.RowsSentMax = params.get("RowsSentMax")
self.RowsSentMin = params.get("RowsSentMin")
self.ExecTimes = params.get("ExecTimes")
self.SqlTemplate = params.get("SqlTemplate")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.QueryTimeRatio = params.get("QueryTimeRatio")
self.LockTimeRatio = params.get("LockTimeRatio")
self.RowsExaminedRatio = params.get("RowsExaminedRatio")
self.RowsSentRatio = params.get("RowsSentRatio")
self.QueryTimeAvg = params.get("QueryTimeAvg")
self.RowsSentAvg = params.get("RowsSentAvg")
self.LockTimeAvg = params.get("LockTimeAvg")
self.RowsExaminedAvg = params.get("RowsExaminedAvg")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceData(AbstractModel):
"""库表空间统计数据。
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 表对应的独立物理文件大小(MB)。
:type PhysicalFileSize: float
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceTimeSeries(AbstractModel):
"""库表空间时序数据
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorFloatMetricSeriesData`
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.SeriesData = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorFloatMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TimeSlice(AbstractModel):
"""单位时间间隔内的慢日志统计
"""
def __init__(self):
r"""
:param Count: 总数
:type Count: int
:param Timestamp: 统计开始时间
:type Timestamp: int
"""
self.Count = None
self.Timestamp = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserProfile(AbstractModel):
"""用户配置的相关信息,包括邮件配置。
"""
def __init__(self):
r"""
:param ProfileId: 配置的id。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileId: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileType: str
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileLevel: str
:param ProfileName: 配置名称。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileName: str
:param ProfileInfo: 配置详情。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
"""
self.ProfileId = None
self.ProfileType = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileInfo = None
def _deserialize(self, params):
self.ProfileId = params.get("ProfileId")
self.ProfileType = params.get("ProfileType")
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| zh | 0.728044 | # -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. AddUserContact请求参数结构体 :param Name: 联系人姓名,由中英文、数字、空格、!@#$%^&*()_+-=()组成,不能以下划线开头,长度在20以内。 :type Name: str :param ContactInfo: 邮箱地址,支持大小写字母、数字、下划线及@字符, 不能以下划线开头,邮箱地址不可重复。 :type ContactInfo: str :param Product: 服务产品类型,固定值:"mysql"。 :type Product: str AddUserContact返回参数结构体 :param Id: 添加成功的联系人id。 :type Id: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str 联系人contact描述。 :param Id: 联系人id。 :type Id: int :param Name: 联系人姓名。 :type Name: str :param Mail: 联系人绑定的邮箱。 :type Mail: str CreateDBDiagReportTask请求参数结构体 :param InstanceId: 实例ID。 :type InstanceId: str :param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。 :type StartTime: str :param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。 :type EndTime: str :param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。 :type SendMailFlag: int :param ContactPerson: 接收邮件的联系人ID数组。 :type ContactPerson: list of int :param ContactGroup: 接收邮件的联系组ID数组。 :type ContactGroup: list of int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。 :type Product: str CreateDBDiagReportTask返回参数结构体 :param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。 注意:此字段可能返回 null,表示取不到有效值。 :type AsyncRequestId: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str CreateDBDiagReportUrl请求参数结构体 :param InstanceId: 实例ID。 :type InstanceId: str :param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。 :type AsyncRequestId: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str CreateDBDiagReportUrl返回参数结构体 :param ReportUrl: 健康报告浏览地址。 :type ReportUrl: str :param ExpireTime: 健康报告浏览地址到期时间戳(秒)。 :type ExpireTime: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str CreateMailProfile请求参数结构体 :param ProfileInfo: 邮件配置内容。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` :param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。 :type ProfileLevel: str :param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。 :type ProfileName: str :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 :type ProfileType: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。 :type Product: str :param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。 :type BindInstanceIds: list of str CreateMailProfile返回参数结构体 :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str CreateSchedulerMailProfile请求参数结构体 :param WeekConfiguration: 取值范围1-7,分别代表周一至周日。 :type WeekConfiguration: list of int :param ProfileInfo: 邮件配置内容。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` :param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。 :type ProfileName: str :param BindInstanceId: 配置订阅的实例ID。 :type BindInstanceId: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str CreateSchedulerMailProfile返回参数结构体 :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str CreateSecurityAuditLogExportTask请求参数结构体 :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。 :type StartTime: str :param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。 :type EndTime: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str :param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。 :type DangerLevels: list of int CreateSecurityAuditLogExportTask返回参数结构体 :param AsyncRequestId: 日志导出任务Id。 :type AsyncRequestId: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DeleteSecurityAuditLogExportTasks请求参数结构体 :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。 :type AsyncRequestIds: list of int non-negative :param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。 :type Product: str DeleteSecurityAuditLogExportTasks返回参数结构体 :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeAllUserContact请求参数结构体 :param Product: 服务产品类型,固定值:mysql。 :type Product: str :param Names: 联系人名数组,支持模糊搜索。 :type Names: list of str DescribeAllUserContact返回参数结构体 :param TotalCount: 联系人的总数量。 :type TotalCount: int :param Contacts: 联系人的信息。 注意:此字段可能返回 null,表示取不到有效值。 :type Contacts: list of ContactItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeAllUserGroup请求参数结构体 :param Product: 服务产品类型,固定值:mysql。 :type Product: str :param Names: 联系组名称数组,支持模糊搜索。 :type Names: list of str DescribeAllUserGroup返回参数结构体 :param TotalCount: 组总数。 :type TotalCount: int :param Groups: 组信息。 注意:此字段可能返回 null,表示取不到有效值。 :type Groups: list of GroupItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeDBDiagEvent请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。 :type EventId: int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeDBDiagEvent返回参数结构体 :param DiagItem: 诊断项。 :type DiagItem: str :param DiagType: 诊断类型。 :type DiagType: str :param EventId: 事件 ID 。 :type EventId: int :param Explanation: 诊断事件详情,若无附加解释信息则输出为空。 :type Explanation: str :param Outline: 诊断概要。 :type Outline: str :param Problem: 诊断出的问题。 :type Problem: str :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param StartTime: 开始时间 :type StartTime: str :param Suggestions: 诊断建议,若无建议则输出为空。 :type Suggestions: str :param Metric: 保留字段。 注意:此字段可能返回 null,表示取不到有效值。 :type Metric: str :param EndTime: 结束时间。 :type EndTime: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeDBDiagHistory请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。 :type EndTime: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeDBDiagHistory返回参数结构体 :param Events: 事件描述。 :type Events: list of DiagHistoryEventItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeDBDiagReportTasks请求参数结构体 :param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。 :type StartTime: str :param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。 :type EndTime: str :param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。 :type InstanceIds: list of str :param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。 :type Sources: list of str :param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。 :type HealthLevels: str :param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。 :type TaskStatuses: str :param Offset: 偏移量,默认0。 :type Offset: int :param Limit: 返回数量,默认20,最大值为100。 :type Limit: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str DescribeDBDiagReportTasks返回参数结构体 :param TotalCount: 任务总数目。 :type TotalCount: int :param Tasks: 任务列表。 :type Tasks: list of HealthReportTask :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeDBSpaceStatus请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param RangeDays: 时间段天数,截止日期为当日,默认为7天。 :type RangeDays: int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeDBSpaceStatus返回参数结构体 :param Growth: 磁盘增长量(MB)。 :type Growth: int :param Remain: 磁盘剩余(MB)。 :type Remain: int :param Total: 磁盘总量(MB)。 :type Total: int :param AvailableDays: 预计可用天数。 :type AvailableDays: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeDiagDBInstances请求参数结构体 :param IsSupported: 是否是DBbrain支持的实例,固定传 true。 :type IsSupported: bool :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Offset: 分页参数,偏移量。 :type Offset: int :param Limit: 分页参数,分页值,最大值为100。 :type Limit: int :param InstanceNames: 根据实例名称条件查询。 :type InstanceNames: list of str :param InstanceIds: 根据实例ID条件查询。 :type InstanceIds: list of str :param Regions: 根据地域条件查询。 :type Regions: list of str DescribeDiagDBInstances返回参数结构体 :param TotalCount: 实例总数。 :type TotalCount: int :param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。 :type DbScanStatus: int :param Items: 实例相关信息。 :type Items: list of InstanceInfo :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeHealthScore请求参数结构体 :param InstanceId: 需要获取健康得分的实例ID。 :type InstanceId: str :param Time: 获取健康得分的时间,时间格式如:2019-09-10 12:13:14。 :type Time: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str DescribeHealthScore返回参数结构体 :param Data: 健康得分以及异常扣分项。 :type Data: :class:`tencentcloud.dbbrain.v20210527.models.HealthScoreInfo` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeMailProfile请求参数结构体 :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 :type ProfileType: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Offset: 分页偏移量。 :type Offset: int :param Limit: 分页单位,最大支持50。 :type Limit: int :param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。 :type ProfileName: str DescribeMailProfile返回参数结构体 :param ProfileList: 邮件配置详情。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileList: list of UserProfile :param TotalCount: 邮件模版总数。 注意:此字段可能返回 null,表示取不到有效值。 :type TotalCount: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeMySqlProcessList请求参数结构体 :param InstanceId: 实例ID。 :type InstanceId: str :param ID: 线程的ID,用于筛选线程列表。 :type ID: int :param User: 线程的操作账号名,用于筛选线程列表。 :type User: str :param Host: 线程的操作主机地址,用于筛选线程列表。 :type Host: str :param DB: 线程的操作数据库,用于筛选线程列表。 :type DB: str :param State: 线程的操作状态,用于筛选线程列表。 :type State: str :param Command: 线程的执行类型,用于筛选线程列表。 :type Command: str :param Time: 线程的操作时长最小值,单位秒,用于筛选操作时长大于该值的线程列表。 :type Time: int :param Info: 线程的操作语句,用于筛选线程列表。 :type Info: str :param Limit: 返回数量,默认20。 :type Limit: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str DescribeMySqlProcessList返回参数结构体 :param ProcessList: 实时线程列表。 :type ProcessList: list of MySqlProcess :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeSecurityAuditLogDownloadUrls请求参数结构体 :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param AsyncRequestId: 异步任务Id。 :type AsyncRequestId: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str DescribeSecurityAuditLogDownloadUrls返回参数结构体 :param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。 :type Urls: list of str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeSecurityAuditLogExportTasks请求参数结构体 :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str :param AsyncRequestIds: 日志导出任务Id列表。 :type AsyncRequestIds: list of int non-negative :param Offset: 偏移量,默认0。 :type Offset: int :param Limit: 返回数量,默认20,最大值为100。 :type Limit: int DescribeSecurityAuditLogExportTasks返回参数结构体 :param Tasks: 安全审计日志导出任务列表。 :type Tasks: list of SecLogExportTaskInfo :param TotalCount: 安全审计日志导出任务总数。 :type TotalCount: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeSlowLogTimeSeriesStats请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。 :type EndTime: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeSlowLogTimeSeriesStats返回参数结构体 :param Period: 柱间单位时间间隔,单位为秒。 :type Period: int :param TimeSeries: 单位时间间隔内慢日志数量统计。 :type TimeSeries: list of TimeSlice :param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeSlowLogTopSqls请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 截止时间,如“2019-09-11 10:13:14”,截止时间与开始时间的间隔小于7天。 :type EndTime: str :param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键,默认为QueryTime。 :type SortBy: str :param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序),默认为DESC。 :type OrderBy: str :param Limit: 返回数量,默认为20,最大值为100。 :type Limit: int :param Offset: 偏移量,默认为0。 :type Offset: int :param SchemaList: 数据库名称数组。 :type SchemaList: list of SchemaItem :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeSlowLogTopSqls返回参数结构体 :param TotalCount: 符合条件的记录总数。 :type TotalCount: int :param Rows: 慢日志 top sql 列表 :type Rows: list of SlowLogTopSqlItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeSlowLogUserHostStats请求参数结构体 :param InstanceId: 实例ID。 :type InstanceId: str :param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。 :type StartTime: str :param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。 :type EndTime: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Md5: SOL模板的MD5值 :type Md5: str DescribeSlowLogUserHostStats返回参数结构体 :param TotalCount: 来源地址数目。 :type TotalCount: int :param Items: 各来源地址的慢日志占比详情列表。 :type Items: list of SlowLogHost :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeTopSpaceSchemaTimeSeries请求参数结构体 :param InstanceId: 实例ID。 :type InstanceId: str :param Limit: 返回的Top库数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。 :type StartDate: str :param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。 :type EndDate: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeTopSpaceSchemaTimeSeries返回参数结构体 :param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。 :type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeTopSpaceSchemas请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top库数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeTopSpaceSchemas返回参数结构体 :param TopSpaceSchemas: 返回的Top库空间统计信息列表。 :type TopSpaceSchemas: list of SchemaSpaceData :param Timestamp: 采集库空间数据的时间戳(秒)。 :type Timestamp: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeTopSpaceTableTimeSeries请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top表数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。 :type SortBy: str :param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。 :type StartDate: str :param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。 :type EndDate: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeTopSpaceTableTimeSeries返回参数结构体 :param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。 :type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeTopSpaceTables请求参数结构体 :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top表数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str DescribeTopSpaceTables返回参数结构体 :param TopSpaceTables: 返回的Top表空间统计信息列表。 :type TopSpaceTables: list of TableSpaceData :param Timestamp: 采集表空间数据的时间戳(秒)。 :type Timestamp: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str DescribeUserSqlAdvice请求参数结构体 :param InstanceId: 实例ID。 :type InstanceId: str :param SqlText: SQL语句。 :type SqlText: str :param Schema: 库名。 :type Schema: str DescribeUserSqlAdvice返回参数结构体 :param Advices: SQL优化建议,可解析为JSON数组,无需优化时输出为空。 :type Advices: str :param Comments: SQL优化建议备注,可解析为String数组,无需优化时输出为空。 :type Comments: str :param SqlText: SQL语句。 :type SqlText: str :param Schema: 库名。 :type Schema: str :param Tables: 相关表的DDL信息,可解析为JSON数组。 :type Tables: str :param SqlPlan: SQL执行计划,可解析为JSON,无需优化时输出为空。 :type SqlPlan: str :param Cost: SQL优化后的成本节约详情,可解析为JSON,无需优化时输出为空。 :type Cost: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str 实例诊断历史事件 :param DiagType: 诊断类型。 :type DiagType: str :param EndTime: 结束时间。 :type EndTime: str :param StartTime: 开始时间。 :type StartTime: str :param EventId: 事件唯一ID 。 :type EventId: int :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param Outline: 诊断概要。 :type Outline: str :param DiagItem: 诊断项说明。 :type DiagItem: str :param InstanceId: 实例 ID 。 :type InstanceId: str :param Metric: 保留字段。 注意:此字段可能返回 null,表示取不到有效值。 :type Metric: str :param Region: 地域。 :type Region: str 异常事件信息。 :param EventId: 事件 ID 。 :type EventId: int :param DiagType: 诊断类型。 :type DiagType: str :param StartTime: 开始时间。 :type StartTime: str :param EndTime: 结束时间。 :type EndTime: str :param Outline: 概要。 :type Outline: str :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param ScoreLost: 扣分。 :type ScoreLost: int :param Metric: 保留字段。 :type Metric: str :param Count: 告警数目。 :type Count: int 描述组信息。 :param Id: 组id。 :type Id: int :param Name: 组名称。 :type Name: str :param MemberCount: 组成员数量。 :type MemberCount: int 健康报告任务详情。 :param AsyncRequestId: 异步任务请求 ID。 :type AsyncRequestId: int :param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。 :type Source: str :param Progress: 任务完成进度,单位%。 :type Progress: int :param CreateTime: 任务创建时间。 :type CreateTime: str :param StartTime: 任务开始执行时间。 :type StartTime: str :param EndTime: 任务完成执行时间。 :type EndTime: str :param InstanceInfo: 任务所属实例的基础信息。 :type InstanceInfo: :class:`tencentcloud.dbbrain.v20210527.models.InstanceBasicInfo` :param HealthStatus: 健康报告中的健康信息。 :type HealthStatus: :class:`tencentcloud.dbbrain.v20210527.models.HealthStatus` 获取健康得分返回的详情。 :param IssueTypes: 异常详情。 :type IssueTypes: list of IssueTypeInfo :param EventsTotalCount: 异常事件总数。 :type EventsTotalCount: int :param HealthScore: 健康得分。 :type HealthScore: int :param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。 :type HealthLevel: str 实例健康详情。 :param HealthScore: 健康分数,满分100。 :type HealthScore: int :param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。 :type HealthLevel: str :param ScoreLost: 总扣分分数。 :type ScoreLost: int :param ScoreDetails: 扣分详情。 注意:此字段可能返回 null,表示取不到有效值。 :type ScoreDetails: list of ScoreDetail 实例基础信息。 :param InstanceId: 实例ID。 :type InstanceId: str :param InstanceName: 实例名称。 :type InstanceName: str :param Vip: 实例内网IP。 :type Vip: str :param Vport: 实例内网Port。 :type Vport: int :param Product: 实例产品。 :type Product: str :param EngineVersion: 实例引擎版本。 :type EngineVersion: str 实例配置。 :param DailyInspection: 数据库巡检开关, Yes/No。 :type DailyInspection: str :param OverviewDisplay: 实例概览开关,Yes/No。 :type OverviewDisplay: str 查询实例列表,返回实例的相关信息的对象。 :param InstanceId: 实例ID。 :type InstanceId: str :param InstanceName: 实例名称。 :type InstanceName: str :param Region: 实例所属地域。 :type Region: str :param HealthScore: 健康得分。 :type HealthScore: int :param Product: 所属产品。 :type Product: str :param EventCount: 异常事件数量。 :type EventCount: int :param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。 :type InstanceType: int :param Cpu: 核心数。 :type Cpu: int :param Memory: 内存,单位MB。 :type Memory: int :param Volume: 硬盘存储,单位GB。 :type Volume: int :param EngineVersion: 数据库版本。 :type EngineVersion: str :param Vip: 内网地址。 :type Vip: str :param Vport: 内网端口。 :type Vport: int :param Source: 接入来源。 :type Source: str :param GroupId: 分组ID。 :type GroupId: str :param GroupName: 分组组名。 :type GroupName: str :param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。 :type Status: int :param UniqSubnetId: 子网统一ID。 :type UniqSubnetId: str :param DeployMode: cdb类型。 :type DeployMode: str :param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。 :type InitFlag: int :param TaskStatus: 任务状态。 :type TaskStatus: int :param UniqVpcId: 私有网络统一ID。 :type UniqVpcId: str :param InstanceConf: 实例巡检/概览的状态。 :type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs` :param DeadlineTime: 资源到期时间。 :type DeadlineTime: str :param IsSupported: 是否是DBbrain支持的实例。 :type IsSupported: bool :param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。 :type SecAuditStatus: str :param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。 :type AuditPolicyStatus: str :param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。 :type AuditRunningStatus: str 指标信息。 :param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。 :type IssueType: str :param Events: 异常事件。 :type Events: list of EventInfo :param TotalCount: 异常事件总数。 :type TotalCount: int KillMySqlThreads请求参数结构体 :param InstanceId: 实例ID。 :type InstanceId: str :param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。 :type Stage: str :param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。 :type Threads: list of int :param SqlExecId: 执行ID,此参数用于Commit阶段。 :type SqlExecId: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str KillMySqlThreads返回参数结构体 :param Threads: kill完成的sql会话ID列表。 :type Threads: list of int :param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。 注意:此字段可能返回 null,表示取不到有效值。 :type SqlExecId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str 邮件发送配置 :param SendMail: 是否开启邮件发送: 0, 否; 1, 是。 :type SendMail: int :param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。 :type Region: list of str :param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。 :type HealthStatus: list of str :param ContactPerson: 联系人id, 联系人/联系组不能都为空。 :type ContactPerson: list of int :param ContactGroup: 联系组id, 联系人/联系组不能都为空。 :type ContactGroup: list of int ModifyDiagDBInstanceConf请求参数结构体 :param InstanceConfs: 实例配置,包括巡检、概览开关等。 :type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs` :param Regions: 生效实例地域,取值为"All",代表全地域。 :type Regions: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。 :type Product: str :param InstanceIds: 指定更改巡检状态的实例ID。 :type InstanceIds: list of str ModifyDiagDBInstanceConf返回参数结构体 :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str 监控数据(浮点型) :param Metric: 指标名称。 :type Metric: str :param Unit: 指标单位。 :type Unit: str :param Values: 指标值。 注意:此字段可能返回 null,表示取不到有效值。 :type Values: list of float 单位时间间隔内的监控指标数据(浮点型) :param Series: 监控指标。 :type Series: list of MonitorFloatMetric :param Timestamp: 监控指标对应的时间戳。 :type Timestamp: list of int 监控数据 :param Metric: 指标名称。 :type Metric: str :param Unit: 指标单位。 :type Unit: str :param Values: 指标值。 注意:此字段可能返回 null,表示取不到有效值。 :type Values: list of float 单位时间间隔内的监控指标数据 :param Series: 监控指标。 :type Series: list of MonitorMetric :param Timestamp: 监控指标对应的时间戳。 :type Timestamp: list of int 关系型数据库线程 :param ID: 线程ID。 :type ID: str :param User: 线程的操作账号名。 :type User: str :param Host: 线程的操作主机地址。 :type Host: str :param DB: 线程的操作数据库。 :type DB: str :param State: 线程的操作状态。 :type State: str :param Command: 线程的执行类型。 :type Command: str :param Time: 线程的操作时长,单位秒。 :type Time: str :param Info: 线程的操作语句。 :type Info: str 用户配置的信息 :param Language: 语言, 如"zh"。 :type Language: str :param MailConfiguration: 邮件模板的内容。 :type MailConfiguration: :class:`tencentcloud.dbbrain.v20210527.models.MailConfiguration` SchemaItem数组 :param Schema: 数据库名称 :type Schema: str 库空间统计数据。 :param TableSchema: 库名。 :type TableSchema: str :param DataLength: 数据空间(MB)。 :type DataLength: float :param IndexLength: 索引空间(MB)。 :type IndexLength: float :param DataFree: 碎片空间(MB)。 :type DataFree: float :param TotalLength: 总使用空间(MB)。 :type TotalLength: float :param FragRatio: 碎片率(%)。 :type FragRatio: float :param TableRows: 行数。 :type TableRows: int :param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。 注意:此字段可能返回 null,表示取不到有效值。 :type PhysicalFileSize: float 库空间时序数据 :param TableSchema: 库名 :type TableSchema: str :param SeriesData: 单位时间间隔内的空间指标数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData` 扣分详情。 :param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。 :type IssueType: str :param ScoreLost: 扣分总分。 :type ScoreLost: int :param ScoreLostMax: 扣分总分上限。 :type ScoreLostMax: int :param Items: 扣分项列表。 注意:此字段可能返回 null,表示取不到有效值。 :type Items: list of ScoreItem 诊断扣分项。 :param DiagItem: 异常诊断项名称。 :type DiagItem: str :param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。 :type IssueType: str :param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。 :type TopSeverity: str :param Count: 该异常诊断项出现次数。 :type Count: int :param ScoreLost: 扣分分数。 :type ScoreLost: int 安全审计日志导出任务信息 :param AsyncRequestId: 异步任务Id。 :type AsyncRequestId: int :param StartTime: 任务开始时间。 注意:此字段可能返回 null,表示取不到有效值。 :type StartTime: str :param EndTime: 任务结束时间。 注意:此字段可能返回 null,表示取不到有效值。 :type EndTime: str :param CreateTime: 任务创建时间。 :type CreateTime: str :param Status: 任务状态。 :type Status: str :param Progress: 任务执行进度。 :type Progress: int :param LogStartTime: 导出日志开始时间。 注意:此字段可能返回 null,表示取不到有效值。 :type LogStartTime: str :param LogEndTime: 导出日志结束时间。 注意:此字段可能返回 null,表示取不到有效值。 :type LogEndTime: str :param TotalSize: 日志文件总大小,单位KB。 注意:此字段可能返回 null,表示取不到有效值。 :type TotalSize: int :param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。 注意:此字段可能返回 null,表示取不到有效值。 :type DangerLevels: list of int non-negative 慢日志来源地址详情。 :param UserHost: 来源地址。 :type UserHost: str :param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。 :type Ratio: float :param Count: 该来源地址的慢日志数目。 :type Count: int 慢日志TopSql :param LockTime: sql总锁等待时间,单位秒 :type LockTime: float :param LockTimeMax: 最大锁等待时间,单位秒 :type LockTimeMax: float :param LockTimeMin: 最小锁等待时间,单位秒 :type LockTimeMin: float :param RowsExamined: 总扫描行数 :type RowsExamined: int :param RowsExaminedMax: 最大扫描行数 :type RowsExaminedMax: int :param RowsExaminedMin: 最小扫描行数 :type RowsExaminedMin: int :param QueryTime: 总耗时,单位秒 :type QueryTime: float :param QueryTimeMax: 最大执行时间,单位秒 :type QueryTimeMax: float :param QueryTimeMin: 最小执行时间,单位秒 :type QueryTimeMin: float :param RowsSent: 总返回行数 :type RowsSent: int :param RowsSentMax: 最大返回行数 :type RowsSentMax: int :param RowsSentMin: 最小返回行数 :type RowsSentMin: int :param ExecTimes: 执行次数 :type ExecTimes: int :param SqlTemplate: sql模板 :type SqlTemplate: str :param SqlText: 带参数SQL(随机) :type SqlText: str :param Schema: 数据库名 :type Schema: str :param QueryTimeRatio: 总耗时占比,单位% :type QueryTimeRatio: float :param LockTimeRatio: sql总锁等待时间占比,单位% :type LockTimeRatio: float :param RowsExaminedRatio: 总扫描行数占比,单位% :type RowsExaminedRatio: float :param RowsSentRatio: 总返回行数占比,单位% :type RowsSentRatio: float :param QueryTimeAvg: 平均执行时间,单位秒 :type QueryTimeAvg: float :param RowsSentAvg: 平均返回行数 :type RowsSentAvg: float :param LockTimeAvg: 平均锁等待时间,单位秒 :type LockTimeAvg: float :param RowsExaminedAvg: 平均扫描行数 :type RowsExaminedAvg: float :param Md5: SOL模板的MD5值 :type Md5: str 库表空间统计数据。 :param TableName: 表名。 :type TableName: str :param TableSchema: 库名。 :type TableSchema: str :param Engine: 库表的存储引擎。 :type Engine: str :param DataLength: 数据空间(MB)。 :type DataLength: float :param IndexLength: 索引空间(MB)。 :type IndexLength: float :param DataFree: 碎片空间(MB)。 :type DataFree: float :param TotalLength: 总使用空间(MB)。 :type TotalLength: float :param FragRatio: 碎片率(%)。 :type FragRatio: float :param TableRows: 行数。 :type TableRows: int :param PhysicalFileSize: 表对应的独立物理文件大小(MB)。 :type PhysicalFileSize: float 库表空间时序数据 :param TableName: 表名。 :type TableName: str :param TableSchema: 库名。 :type TableSchema: str :param Engine: 库表的存储引擎。 :type Engine: str :param SeriesData: 单位时间间隔内的空间指标数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorFloatMetricSeriesData` 单位时间间隔内的慢日志统计 :param Count: 总数 :type Count: int :param Timestamp: 统计开始时间 :type Timestamp: int 用户配置的相关信息,包括邮件配置。 :param ProfileId: 配置的id。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileId: str :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileType: str :param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileLevel: str :param ProfileName: 配置名称。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileName: str :param ProfileInfo: 配置详情。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` | 2.344346 | 2 |
oauth/provider.py | giuseppe/quay | 2,027 | 795 | <filename>oauth/provider.py<gh_stars>1000+
# Ported to Python 3
# Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py
import json
import logging
from requests import Response
from io import StringIO
try:
from werkzeug.exceptions import Unauthorized
except ImportError:
Unauthorized = Exception
from oauth import utils
class Provider(object):
"""Base provider class for different types of OAuth 2.0 providers."""
def _handle_exception(self, exc):
"""Handle an internal exception that was caught and suppressed.
:param exc: Exception to process.
:type exc: Exception
"""
logger = logging.getLogger(__name__)
logger.exception(exc)
def _make_response(self, body="", headers=None, status_code=200):
"""Return a response object from the given parameters.
:param body: Buffer/string containing the response body.
:type body: str
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
res = Response()
res.status_code = status_code
if headers is not None:
res.headers.update(headers)
res.raw = StringIO(body)
return res
def _make_redirect_error_response(self, redirect_uri, err):
"""Return a HTTP 302 redirect response object containing the error.
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
params = {"error": err, "response_type": None, "client_id": None, "redirect_uri": None}
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={"Location": redirect}, status_code=302)
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers["Content-Type"] = "application/json;charset=UTF-8"
response_headers["Cache-Control"] = "no-store"
response_headers["Pragma"] = "no-cache"
return self._make_response(json.dumps(data), response_headers, status_code)
def _make_json_error_response(self, err):
"""Return a JSON-encoded response object representing the error.
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
return self._make_json_response({"error": err}, status_code=400)
def _invalid_redirect_uri_response(self):
"""What to return when the redirect_uri parameter is missing.
:rtype: requests.Response
"""
return self._make_json_error_response("invalid_request")
class AuthorizationProvider(Provider):
"""OAuth 2.0 authorization provider. This class manages authorization
codes and access tokens. Certain methods MUST be overridden in a
subclass, thus this class cannot be directly used as a provider.
These are the methods that must be implemented in a subclass:
validate_client_id(self, client_id)
# Return True or False
validate_client_secret(self, client_id, client_secret)
# Return True or False
validate_scope(self, client_id, scope)
# Return True or False
validate_redirect_uri(self, client_id, redirect_uri)
# Return True or False
validate_access(self) # Use this to validate your app session user
# Return True or False
from_authorization_code(self, client_id, code, scope)
# Return mixed data or None on invalid
from_refresh_token(self, client_id, refresh_token, scope)
# Return mixed data or None on invalid
persist_authorization_code(self, client_id, code, scope)
# Return value ignored
persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data)
# Return value ignored
discard_authorization_code(self, client_id, code)
# Return value ignored
discard_refresh_token(self, client_id, refresh_token)
# Return value ignored
Optionally, the following may be overridden to acheive desired behavior:
@property
token_length(self)
@property
token_type(self)
@property
token_expires_in(self)
generate_authorization_code(self)
generate_access_token(self)
generate_refresh_token(self)
"""
@property
def token_length(self):
"""Property method to get the length used to generate tokens.
:rtype: int
"""
return 40
@property
def token_type(self):
"""Property method to get the access token type.
:rtype: str
"""
return "Bearer"
@property
def token_expires_in(self):
"""Property method to get the token expiration time in seconds.
:rtype: int
"""
return 3600
def generate_authorization_code(self):
"""Generate a random authorization code.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_access_token(self):
"""Generate a random access token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_refresh_token(self):
"""Generate a random refresh token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def get_authorization_code(self, response_type, client_id, redirect_uri, **params):
"""Generate authorization code HTTP response.
:param response_type: Desired response type. Must be exactly "code".
:type response_type: str
:param client_id: Client ID.
:type client_id: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:rtype: requests.Response
"""
# Ensure proper response_type
if response_type != "code":
err = "unsupported_response_type"
return self._make_redirect_error_response(redirect_uri, err)
# Check redirect URI
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
if not is_valid_redirect_uri:
return self._invalid_redirect_uri_response()
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_access = self.validate_access()
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
# Return proper error responses on invalid conditions
if not is_valid_client_id:
err = "unauthorized_client"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_access:
err = "access_denied"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_scope:
err = "invalid_scope"
return self._make_redirect_error_response(redirect_uri, err)
# Generate authorization code
code = self.generate_authorization_code()
# Save information to be used to validate later requests
self.persist_authorization_code(client_id=client_id, code=code, scope=scope)
# Return redirection response
params.update(
{"code": code, "response_type": None, "client_id": None, "redirect_uri": None}
)
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={"Location": redirect}, status_code=302)
def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params):
"""Generate access token HTTP response from a refresh token.
:param grant_type: Desired grant type. Must be "refresh_token".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param refresh_token: Refresh token.
:type refresh_token: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "refresh_token":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_refresh_token(client_id, refresh_token, scope)
is_valid_refresh_token = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
if not is_valid_refresh_token:
return self._make_json_error_response("invalid_grant")
# Discard original refresh token
self.discard_refresh_token(client_id, refresh_token)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params):
"""Generate access token HTTP response.
:param grant_type: Desired grant type. Must be "authorization_code".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param code: Authorization code.
:type code: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "authorization_code":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_authorization_code(client_id, code, scope)
is_valid_grant = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_grant or not is_valid_redirect_uri:
return self._make_json_error_response("invalid_grant")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
# Discard original authorization code
self.discard_authorization_code(client_id, code)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_authorization_code_from_uri(self, uri):
"""Get authorization code response from a URI. This method will
ignore the domain and path of the request, instead
automatically parsing the query string parameters.
:param uri: URI to parse for authorization information.
:type uri: str
:rtype: requests.Response
"""
params = utils.url_query_params(uri)
try:
if "response_type" not in params:
raise TypeError("Missing parameter response_type in URL query")
if "client_id" not in params:
raise TypeError("Missing parameter client_id in URL query")
if "redirect_uri" not in params:
raise TypeError("Missing parameter redirect_uri in URL query")
return self.get_authorization_code(**params)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
err = "invalid_request"
if "redirect_uri" in params:
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
else:
return self._invalid_redirect_uri_response()
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
err = "server_error"
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
def get_token_from_post_data(self, data):
"""Get a token response from POST data.
:param data: POST data containing authorization information.
:type data: dict
:rtype: requests.Response
"""
try:
# Verify OAuth 2.0 Parameters
for x in ["grant_type", "client_id", "client_secret"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
# Handle get token from refresh_token
if "refresh_token" in data:
return self.refresh_token(**data)
# Handle get token from authorization code
for x in ["redirect_uri", "code"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
return self.get_token(**data)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
return self._make_json_error_response("invalid_request")
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
return self._make_json_error_response("server_error")
def validate_client_id(self, client_id):
raise NotImplementedError("Subclasses must implement " "validate_client_id.")
def validate_client_secret(self, client_id, client_secret):
raise NotImplementedError("Subclasses must implement " "validate_client_secret.")
def validate_redirect_uri(self, client_id, redirect_uri):
raise NotImplementedError("Subclasses must implement " "validate_redirect_uri.")
def validate_scope(self, client_id, scope):
raise NotImplementedError("Subclasses must implement " "validate_scope.")
def validate_access(self):
raise NotImplementedError("Subclasses must implement " "validate_access.")
def from_authorization_code(self, client_id, code, scope):
raise NotImplementedError("Subclasses must implement " "from_authorization_code.")
def from_refresh_token(self, client_id, refresh_token, scope):
raise NotImplementedError("Subclasses must implement " "from_refresh_token.")
def persist_authorization_code(self, client_id, code, scope):
raise NotImplementedError("Subclasses must implement " "persist_authorization_code.")
def persist_token_information(
self, client_id, scope, access_token, token_type, expires_in, refresh_token, data
):
raise NotImplementedError("Subclasses must implement " "persist_token_information.")
def discard_authorization_code(self, client_id, code):
raise NotImplementedError("Subclasses must implement " "discard_authorization_code.")
def discard_refresh_token(self, client_id, refresh_token):
raise NotImplementedError("Subclasses must implement " "discard_refresh_token.")
class OAuthError(Unauthorized):
"""OAuth error, including the OAuth error reason."""
def __init__(self, reason, *args, **kwargs):
self.reason = reason
super(OAuthError, self).__init__(*args, **kwargs)
class ResourceAuthorization(object):
"""A class containing an OAuth 2.0 authorization."""
is_oauth = False
is_valid = None
token = None
client_id = None
expires_in = None
error = None
def raise_error_if_invalid(self):
if not self.is_valid:
raise OAuthError(self.error, "OAuth authorization error")
class ResourceProvider(Provider):
"""OAuth 2.0 resource provider. This class provides an interface
to validate an incoming request and authenticate resource access.
Certain methods MUST be overridden in a subclass, thus this
class cannot be directly used as a resource provider.
These are the methods that must be implemented in a subclass:
get_authorization_header(self)
# Return header string for key "Authorization" or None
validate_access_token(self, access_token, authorization)
# Set is_valid=True, client_id, and expires_in attributes
# on authorization if authorization was successful.
# Return value is ignored
"""
@property
def authorization_class(self):
return ResourceAuthorization
def get_authorization(self):
"""Get authorization object representing status of authentication."""
auth = self.authorization_class()
header = self.get_authorization_header()
if not header or not header.split:
return auth
header = header.split()
if len(header) > 1 and header[0] == "Bearer":
auth.is_oauth = True
access_token = header[1]
self.validate_access_token(access_token, auth)
if not auth.is_valid:
auth.error = "access_denied"
return auth
def get_authorization_header(self):
raise NotImplementedError("Subclasses must implement " "get_authorization_header.")
def validate_access_token(self, access_token, authorization):
raise NotImplementedError("Subclasses must implement " "validate_token.")
| <filename>oauth/provider.py<gh_stars>1000+
# Ported to Python 3
# Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py
import json
import logging
from requests import Response
from io import StringIO
try:
from werkzeug.exceptions import Unauthorized
except ImportError:
Unauthorized = Exception
from oauth import utils
class Provider(object):
"""Base provider class for different types of OAuth 2.0 providers."""
def _handle_exception(self, exc):
"""Handle an internal exception that was caught and suppressed.
:param exc: Exception to process.
:type exc: Exception
"""
logger = logging.getLogger(__name__)
logger.exception(exc)
def _make_response(self, body="", headers=None, status_code=200):
"""Return a response object from the given parameters.
:param body: Buffer/string containing the response body.
:type body: str
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
res = Response()
res.status_code = status_code
if headers is not None:
res.headers.update(headers)
res.raw = StringIO(body)
return res
def _make_redirect_error_response(self, redirect_uri, err):
"""Return a HTTP 302 redirect response object containing the error.
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
params = {"error": err, "response_type": None, "client_id": None, "redirect_uri": None}
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={"Location": redirect}, status_code=302)
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers["Content-Type"] = "application/json;charset=UTF-8"
response_headers["Cache-Control"] = "no-store"
response_headers["Pragma"] = "no-cache"
return self._make_response(json.dumps(data), response_headers, status_code)
def _make_json_error_response(self, err):
"""Return a JSON-encoded response object representing the error.
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
return self._make_json_response({"error": err}, status_code=400)
def _invalid_redirect_uri_response(self):
"""What to return when the redirect_uri parameter is missing.
:rtype: requests.Response
"""
return self._make_json_error_response("invalid_request")
class AuthorizationProvider(Provider):
"""OAuth 2.0 authorization provider. This class manages authorization
codes and access tokens. Certain methods MUST be overridden in a
subclass, thus this class cannot be directly used as a provider.
These are the methods that must be implemented in a subclass:
validate_client_id(self, client_id)
# Return True or False
validate_client_secret(self, client_id, client_secret)
# Return True or False
validate_scope(self, client_id, scope)
# Return True or False
validate_redirect_uri(self, client_id, redirect_uri)
# Return True or False
validate_access(self) # Use this to validate your app session user
# Return True or False
from_authorization_code(self, client_id, code, scope)
# Return mixed data or None on invalid
from_refresh_token(self, client_id, refresh_token, scope)
# Return mixed data or None on invalid
persist_authorization_code(self, client_id, code, scope)
# Return value ignored
persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data)
# Return value ignored
discard_authorization_code(self, client_id, code)
# Return value ignored
discard_refresh_token(self, client_id, refresh_token)
# Return value ignored
Optionally, the following may be overridden to acheive desired behavior:
@property
token_length(self)
@property
token_type(self)
@property
token_expires_in(self)
generate_authorization_code(self)
generate_access_token(self)
generate_refresh_token(self)
"""
@property
def token_length(self):
"""Property method to get the length used to generate tokens.
:rtype: int
"""
return 40
@property
def token_type(self):
"""Property method to get the access token type.
:rtype: str
"""
return "Bearer"
@property
def token_expires_in(self):
"""Property method to get the token expiration time in seconds.
:rtype: int
"""
return 3600
def generate_authorization_code(self):
"""Generate a random authorization code.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_access_token(self):
"""Generate a random access token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_refresh_token(self):
"""Generate a random refresh token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def get_authorization_code(self, response_type, client_id, redirect_uri, **params):
"""Generate authorization code HTTP response.
:param response_type: Desired response type. Must be exactly "code".
:type response_type: str
:param client_id: Client ID.
:type client_id: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:rtype: requests.Response
"""
# Ensure proper response_type
if response_type != "code":
err = "unsupported_response_type"
return self._make_redirect_error_response(redirect_uri, err)
# Check redirect URI
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
if not is_valid_redirect_uri:
return self._invalid_redirect_uri_response()
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_access = self.validate_access()
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
# Return proper error responses on invalid conditions
if not is_valid_client_id:
err = "unauthorized_client"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_access:
err = "access_denied"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_scope:
err = "invalid_scope"
return self._make_redirect_error_response(redirect_uri, err)
# Generate authorization code
code = self.generate_authorization_code()
# Save information to be used to validate later requests
self.persist_authorization_code(client_id=client_id, code=code, scope=scope)
# Return redirection response
params.update(
{"code": code, "response_type": None, "client_id": None, "redirect_uri": None}
)
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={"Location": redirect}, status_code=302)
def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params):
"""Generate access token HTTP response from a refresh token.
:param grant_type: Desired grant type. Must be "refresh_token".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param refresh_token: Refresh token.
:type refresh_token: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "refresh_token":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_refresh_token(client_id, refresh_token, scope)
is_valid_refresh_token = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
if not is_valid_refresh_token:
return self._make_json_error_response("invalid_grant")
# Discard original refresh token
self.discard_refresh_token(client_id, refresh_token)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params):
"""Generate access token HTTP response.
:param grant_type: Desired grant type. Must be "authorization_code".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param code: Authorization code.
:type code: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "authorization_code":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_authorization_code(client_id, code, scope)
is_valid_grant = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_grant or not is_valid_redirect_uri:
return self._make_json_error_response("invalid_grant")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
# Discard original authorization code
self.discard_authorization_code(client_id, code)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_authorization_code_from_uri(self, uri):
"""Get authorization code response from a URI. This method will
ignore the domain and path of the request, instead
automatically parsing the query string parameters.
:param uri: URI to parse for authorization information.
:type uri: str
:rtype: requests.Response
"""
params = utils.url_query_params(uri)
try:
if "response_type" not in params:
raise TypeError("Missing parameter response_type in URL query")
if "client_id" not in params:
raise TypeError("Missing parameter client_id in URL query")
if "redirect_uri" not in params:
raise TypeError("Missing parameter redirect_uri in URL query")
return self.get_authorization_code(**params)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
err = "invalid_request"
if "redirect_uri" in params:
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
else:
return self._invalid_redirect_uri_response()
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
err = "server_error"
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
def get_token_from_post_data(self, data):
"""Get a token response from POST data.
:param data: POST data containing authorization information.
:type data: dict
:rtype: requests.Response
"""
try:
# Verify OAuth 2.0 Parameters
for x in ["grant_type", "client_id", "client_secret"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
# Handle get token from refresh_token
if "refresh_token" in data:
return self.refresh_token(**data)
# Handle get token from authorization code
for x in ["redirect_uri", "code"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
return self.get_token(**data)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
return self._make_json_error_response("invalid_request")
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
return self._make_json_error_response("server_error")
def validate_client_id(self, client_id):
raise NotImplementedError("Subclasses must implement " "validate_client_id.")
def validate_client_secret(self, client_id, client_secret):
raise NotImplementedError("Subclasses must implement " "validate_client_secret.")
def validate_redirect_uri(self, client_id, redirect_uri):
raise NotImplementedError("Subclasses must implement " "validate_redirect_uri.")
def validate_scope(self, client_id, scope):
raise NotImplementedError("Subclasses must implement " "validate_scope.")
def validate_access(self):
raise NotImplementedError("Subclasses must implement " "validate_access.")
def from_authorization_code(self, client_id, code, scope):
raise NotImplementedError("Subclasses must implement " "from_authorization_code.")
def from_refresh_token(self, client_id, refresh_token, scope):
raise NotImplementedError("Subclasses must implement " "from_refresh_token.")
def persist_authorization_code(self, client_id, code, scope):
raise NotImplementedError("Subclasses must implement " "persist_authorization_code.")
def persist_token_information(
self, client_id, scope, access_token, token_type, expires_in, refresh_token, data
):
raise NotImplementedError("Subclasses must implement " "persist_token_information.")
def discard_authorization_code(self, client_id, code):
raise NotImplementedError("Subclasses must implement " "discard_authorization_code.")
def discard_refresh_token(self, client_id, refresh_token):
raise NotImplementedError("Subclasses must implement " "discard_refresh_token.")
class OAuthError(Unauthorized):
"""OAuth error, including the OAuth error reason."""
def __init__(self, reason, *args, **kwargs):
self.reason = reason
super(OAuthError, self).__init__(*args, **kwargs)
class ResourceAuthorization(object):
"""A class containing an OAuth 2.0 authorization."""
is_oauth = False
is_valid = None
token = None
client_id = None
expires_in = None
error = None
def raise_error_if_invalid(self):
if not self.is_valid:
raise OAuthError(self.error, "OAuth authorization error")
class ResourceProvider(Provider):
"""OAuth 2.0 resource provider. This class provides an interface
to validate an incoming request and authenticate resource access.
Certain methods MUST be overridden in a subclass, thus this
class cannot be directly used as a resource provider.
These are the methods that must be implemented in a subclass:
get_authorization_header(self)
# Return header string for key "Authorization" or None
validate_access_token(self, access_token, authorization)
# Set is_valid=True, client_id, and expires_in attributes
# on authorization if authorization was successful.
# Return value is ignored
"""
@property
def authorization_class(self):
return ResourceAuthorization
def get_authorization(self):
"""Get authorization object representing status of authentication."""
auth = self.authorization_class()
header = self.get_authorization_header()
if not header or not header.split:
return auth
header = header.split()
if len(header) > 1 and header[0] == "Bearer":
auth.is_oauth = True
access_token = header[1]
self.validate_access_token(access_token, auth)
if not auth.is_valid:
auth.error = "access_denied"
return auth
def get_authorization_header(self):
raise NotImplementedError("Subclasses must implement " "get_authorization_header.")
def validate_access_token(self, access_token, authorization):
raise NotImplementedError("Subclasses must implement " "validate_token.")
| en | 0.568397 | # Ported to Python 3 # Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py Base provider class for different types of OAuth 2.0 providers. Handle an internal exception that was caught and suppressed. :param exc: Exception to process. :type exc: Exception Return a response object from the given parameters. :param body: Buffer/string containing the response body. :type body: str :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response Return a HTTP 302 redirect response object containing the error. :param redirect_uri: Client redirect URI. :type redirect_uri: str :param err: OAuth error message. :type err: str :rtype: requests.Response Return a response object from the given JSON data. :param data: Data to JSON-encode. :type data: mixed :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response Return a JSON-encoded response object representing the error. :param err: OAuth error message. :type err: str :rtype: requests.Response What to return when the redirect_uri parameter is missing. :rtype: requests.Response OAuth 2.0 authorization provider. This class manages authorization codes and access tokens. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a provider. These are the methods that must be implemented in a subclass: validate_client_id(self, client_id) # Return True or False validate_client_secret(self, client_id, client_secret) # Return True or False validate_scope(self, client_id, scope) # Return True or False validate_redirect_uri(self, client_id, redirect_uri) # Return True or False validate_access(self) # Use this to validate your app session user # Return True or False from_authorization_code(self, client_id, code, scope) # Return mixed data or None on invalid from_refresh_token(self, client_id, refresh_token, scope) # Return mixed data or None on invalid persist_authorization_code(self, client_id, code, scope) # Return value ignored persist_token_information(self, client_id, scope, access_token, token_type, expires_in, refresh_token, data) # Return value ignored discard_authorization_code(self, client_id, code) # Return value ignored discard_refresh_token(self, client_id, refresh_token) # Return value ignored Optionally, the following may be overridden to acheive desired behavior: @property token_length(self) @property token_type(self) @property token_expires_in(self) generate_authorization_code(self) generate_access_token(self) generate_refresh_token(self) Property method to get the length used to generate tokens. :rtype: int Property method to get the access token type. :rtype: str Property method to get the token expiration time in seconds. :rtype: int Generate a random authorization code. :rtype: str Generate a random access token. :rtype: str Generate a random refresh token. :rtype: str Generate authorization code HTTP response. :param response_type: Desired response type. Must be exactly "code". :type response_type: str :param client_id: Client ID. :type client_id: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :rtype: requests.Response # Ensure proper response_type # Check redirect URI # Check conditions # Return proper error responses on invalid conditions # Generate authorization code # Save information to be used to validate later requests # Return redirection response Generate access token HTTP response from a refresh token. :param grant_type: Desired grant type. Must be "refresh_token". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param refresh_token: Refresh token. :type refresh_token: str :rtype: requests.Response # Ensure proper grant_type # Check conditions # Return proper error responses on invalid conditions # Discard original refresh token # Generate access tokens once all conditions have been met # Save information to be used to validate later requests # Return json response Generate access token HTTP response. :param grant_type: Desired grant type. Must be "authorization_code". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :param code: Authorization code. :type code: str :rtype: requests.Response # Ensure proper grant_type # Check conditions # Return proper error responses on invalid conditions # Discard original authorization code # Generate access tokens once all conditions have been met # Save information to be used to validate later requests # Return json response Get authorization code response from a URI. This method will ignore the domain and path of the request, instead automatically parsing the query string parameters. :param uri: URI to parse for authorization information. :type uri: str :rtype: requests.Response # Catch missing parameters in request # Catch all other server errors Get a token response from POST data. :param data: POST data containing authorization information. :type data: dict :rtype: requests.Response # Verify OAuth 2.0 Parameters # Handle get token from refresh_token # Handle get token from authorization code # Catch missing parameters in request # Catch all other server errors OAuth error, including the OAuth error reason. A class containing an OAuth 2.0 authorization. OAuth 2.0 resource provider. This class provides an interface to validate an incoming request and authenticate resource access. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a resource provider. These are the methods that must be implemented in a subclass: get_authorization_header(self) # Return header string for key "Authorization" or None validate_access_token(self, access_token, authorization) # Set is_valid=True, client_id, and expires_in attributes # on authorization if authorization was successful. # Return value is ignored Get authorization object representing status of authentication. | 2.98269 | 3 |
main.py | TomHacker/ImageCluster | 10 | 796 | <reponame>TomHacker/ImageCluster
from model import ImageCluster
m=ImageCluster(
base_model='vgg16',#your feature map extractor model
resorted_img_folder='resorted_data',#the folder for clustered images
cluster_algo='kmeans',#cluster algorithm
base_img_folder='data',
maxK=150,#the max k num is 30, which means ImageCluster calculates every k in range(2,30+1)
)
# calculate the feature maps
# m.get_feature_map(
# resize_shape=(224,224) # (w,h) a tuple for resizing the input images to the same shape
# )
# #clustering for feature maps
# m.imagecluster()
#As we can see, 21 may be the best cluster number for this dataset.
#So,we can call the resorted_img function to label the images under different folders
m.resorted_img(
selected_k_num=100# a int number in range[2,maxK]
)
| from model import ImageCluster
m=ImageCluster(
base_model='vgg16',#your feature map extractor model
resorted_img_folder='resorted_data',#the folder for clustered images
cluster_algo='kmeans',#cluster algorithm
base_img_folder='data',
maxK=150,#the max k num is 30, which means ImageCluster calculates every k in range(2,30+1)
)
# calculate the feature maps
# m.get_feature_map(
# resize_shape=(224,224) # (w,h) a tuple for resizing the input images to the same shape
# )
# #clustering for feature maps
# m.imagecluster()
#As we can see, 21 may be the best cluster number for this dataset.
#So,we can call the resorted_img function to label the images under different folders
m.resorted_img(
selected_k_num=100# a int number in range[2,maxK]
) | en | 0.702829 | #your feature map extractor model #the folder for clustered images #cluster algorithm #the max k num is 30, which means ImageCluster calculates every k in range(2,30+1) # calculate the feature maps # m.get_feature_map( # resize_shape=(224,224) # (w,h) a tuple for resizing the input images to the same shape # ) # #clustering for feature maps # m.imagecluster() #As we can see, 21 may be the best cluster number for this dataset. #So,we can call the resorted_img function to label the images under different folders # a int number in range[2,maxK] | 2.914708 | 3 |
tests/dummies.py | arvindmuralie77/gradsflow | 253 | 797 | # Copyright (c) 2021 GradsFlow. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from gradsflow.models import Model
class DummyModel(Model):
def __init__(self):
learner = torch.nn.Linear(1, 4)
super().__init__(learner)
def backward(self, loss: torch.Tensor):
return None
def train_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
def val_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
| # Copyright (c) 2021 GradsFlow. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from gradsflow.models import Model
class DummyModel(Model):
def __init__(self):
learner = torch.nn.Linear(1, 4)
super().__init__(learner)
def backward(self, loss: torch.Tensor):
return None
def train_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
def val_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
| en | 0.86047 | # Copyright (c) 2021 GradsFlow. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.32092 | 2 |
JumpscaleCore/tools/executor/ExecutorSerial.py | gneumann333/jumpscaleX_core | 1 | 798 | <reponame>gneumann333/jumpscaleX_core
from Jumpscale import j
JSBASE = j.baseclasses.object
from .ExecutorBase import *
import serial
class ExecutorSerial(ExecutorBase):
"""
This executor is primary made to communicate with devices (routers, switch, ...) over
console cable but you can use underlaying method to communicate with any serial device.
Please note that default mode attempt to recognize a device with cisco like commands.
"""
def __init__(self, device, baudrate=9600, type="serial", parity="N", stopbits=1, bytesize=8, timeout=1):
ExecutorBase.__init__(self, checkok=False)
self.device = device
self.baudrate = baudrate
self.type = type
self.parity = parity
self.stopbits = stopbits
self.bytesize = bytesize
self.timeout = timeout
self._id = None
self._log_info("Initialized")
self.reconnect()
self.fetch()
def reconnect(self):
self.console = serial.Serial(
port=self.device,
baudrate=self.baudrate,
parity=self.parity,
stopbits=self.stopbits,
bytesize=self.bytesize,
timeout=self.timeout,
)
return True
@property
def id(self):
if self._id is None:
self._id = "serial.%s" % (self.device)
return self._id
def execute(self, cmds, die=True, checkok=None, showout=True, timeout=0, env={}):
self._log_debug("Serial command: %s" % cmds)
if not cmds.endswith("\n"):
cmds += "\n"
self.send(cmds)
return 0, "", ""
def send(self, data):
self.console.write(data.encode("utf-8"))
def fetch(self):
input = self.console.read_all()
return input.decode("utf-8")
def enter(self, command):
self.send(command)
self.send("\n")
def _execute_script(self, content="", die=True, showout=True, checkok=None):
raise j.exceptions.NotImplemented()
def upload(self, source, dest, dest_prefix="", recursive=True, createdir=True):
raise j.exceptions.NotImplemented()
def download(self, source, dest, source_prefix="", recursive=True):
raise j.exceptions.NotImplemented()
def __repr__(self):
return "Executor serial: %s" % (self.device)
__str__ = __repr__
| from Jumpscale import j
JSBASE = j.baseclasses.object
from .ExecutorBase import *
import serial
class ExecutorSerial(ExecutorBase):
"""
This executor is primary made to communicate with devices (routers, switch, ...) over
console cable but you can use underlaying method to communicate with any serial device.
Please note that default mode attempt to recognize a device with cisco like commands.
"""
def __init__(self, device, baudrate=9600, type="serial", parity="N", stopbits=1, bytesize=8, timeout=1):
ExecutorBase.__init__(self, checkok=False)
self.device = device
self.baudrate = baudrate
self.type = type
self.parity = parity
self.stopbits = stopbits
self.bytesize = bytesize
self.timeout = timeout
self._id = None
self._log_info("Initialized")
self.reconnect()
self.fetch()
def reconnect(self):
self.console = serial.Serial(
port=self.device,
baudrate=self.baudrate,
parity=self.parity,
stopbits=self.stopbits,
bytesize=self.bytesize,
timeout=self.timeout,
)
return True
@property
def id(self):
if self._id is None:
self._id = "serial.%s" % (self.device)
return self._id
def execute(self, cmds, die=True, checkok=None, showout=True, timeout=0, env={}):
self._log_debug("Serial command: %s" % cmds)
if not cmds.endswith("\n"):
cmds += "\n"
self.send(cmds)
return 0, "", ""
def send(self, data):
self.console.write(data.encode("utf-8"))
def fetch(self):
input = self.console.read_all()
return input.decode("utf-8")
def enter(self, command):
self.send(command)
self.send("\n")
def _execute_script(self, content="", die=True, showout=True, checkok=None):
raise j.exceptions.NotImplemented()
def upload(self, source, dest, dest_prefix="", recursive=True, createdir=True):
raise j.exceptions.NotImplemented()
def download(self, source, dest, source_prefix="", recursive=True):
raise j.exceptions.NotImplemented()
def __repr__(self):
return "Executor serial: %s" % (self.device)
__str__ = __repr__ | en | 0.942915 | This executor is primary made to communicate with devices (routers, switch, ...) over console cable but you can use underlaying method to communicate with any serial device. Please note that default mode attempt to recognize a device with cisco like commands. | 3.007535 | 3 |
melisa/utils/snowflake.py | MelisaDev/melisa | 5 | 799 | # Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
class Snowflake(int):
"""
Discord utilizes Twitter's snowflake format for uniquely identifiable descriptors (IDs).
These IDs are guaranteed to be unique across all of Discord,
except in some unique scenarios in which child objects share their parent's ID.
Because Snowflake IDs are up to 64 bits in size (e.g. a uint64),
they are always returned as strings in the HTTP API
to prevent integer overflows in some languages.
See Gateway ETF/JSON for more information regarding Gateway encoding.
Read more here: https://discord.com/developers/docs/reference#snowflakes
"""
_MAX_VALUE: int = 9223372036854775807
_MIN_VALUE: int = 0
def __init__(self, _):
super().__init__()
if self < self._MIN_VALUE:
raise ValueError("snowflake value should be greater than or equal to 0.")
if self > self._MAX_VALUE:
raise ValueError(
"snowflake value should be less than or equal to 9223372036854775807."
)
@classmethod
def __factory__(cls, string: str) -> Snowflake:
return cls.from_string(string)
@classmethod
def from_string(cls, string: str):
"""Initialize a new Snowflake from a string.
Parameters
----------
string: :class:`str`
The snowflake as a string.
"""
return Snowflake(int(string))
@property
def timestamp(self) -> int:
"""
Milliseconds since Discord Epoch, the first second of 2015 or 1420070400000.
"""
return self >> 22
@property
def worker_id(self) -> int:
"""Internal worker ID"""
return (self >> 17) % 16
@property
def process_id(self) -> int:
"""Internal process ID"""
return (self >> 12) % 16
@property
def increment(self) -> int:
"""For every ID that is generated on that process, this number is incremented"""
return self % 2048
@property
def unix(self) -> int:
return self.timestamp + 1420070400000
| # Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
class Snowflake(int):
"""
Discord utilizes Twitter's snowflake format for uniquely identifiable descriptors (IDs).
These IDs are guaranteed to be unique across all of Discord,
except in some unique scenarios in which child objects share their parent's ID.
Because Snowflake IDs are up to 64 bits in size (e.g. a uint64),
they are always returned as strings in the HTTP API
to prevent integer overflows in some languages.
See Gateway ETF/JSON for more information regarding Gateway encoding.
Read more here: https://discord.com/developers/docs/reference#snowflakes
"""
_MAX_VALUE: int = 9223372036854775807
_MIN_VALUE: int = 0
def __init__(self, _):
super().__init__()
if self < self._MIN_VALUE:
raise ValueError("snowflake value should be greater than or equal to 0.")
if self > self._MAX_VALUE:
raise ValueError(
"snowflake value should be less than or equal to 9223372036854775807."
)
@classmethod
def __factory__(cls, string: str) -> Snowflake:
return cls.from_string(string)
@classmethod
def from_string(cls, string: str):
"""Initialize a new Snowflake from a string.
Parameters
----------
string: :class:`str`
The snowflake as a string.
"""
return Snowflake(int(string))
@property
def timestamp(self) -> int:
"""
Milliseconds since Discord Epoch, the first second of 2015 or 1420070400000.
"""
return self >> 22
@property
def worker_id(self) -> int:
"""Internal worker ID"""
return (self >> 17) % 16
@property
def process_id(self) -> int:
"""Internal process ID"""
return (self >> 12) % 16
@property
def increment(self) -> int:
"""For every ID that is generated on that process, this number is incremented"""
return self % 2048
@property
def unix(self) -> int:
return self.timestamp + 1420070400000
| en | 0.835261 | # Copyright MelisaDev 2022 - Present # Full MIT License can be found in `LICENSE.txt` at the project root. Discord utilizes Twitter's snowflake format for uniquely identifiable descriptors (IDs). These IDs are guaranteed to be unique across all of Discord, except in some unique scenarios in which child objects share their parent's ID. Because Snowflake IDs are up to 64 bits in size (e.g. a uint64), they are always returned as strings in the HTTP API to prevent integer overflows in some languages. See Gateway ETF/JSON for more information regarding Gateway encoding. Read more here: https://discord.com/developers/docs/reference#snowflakes Initialize a new Snowflake from a string. Parameters ---------- string: :class:`str` The snowflake as a string. Milliseconds since Discord Epoch, the first second of 2015 or 1420070400000. Internal worker ID Internal process ID For every ID that is generated on that process, this number is incremented | 2.420337 | 2 |