repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
janpipek/physt | physt/binnings.py | human_binning | def human_binning(data=None, bin_count: Optional[int] = None, *, range=None, **kwargs) -> FixedWidthBinning:
"""Construct fixed-width ninning schema with bins automatically optimized to human-friendly widths.
Typical widths are: 1.0, 25,0, 0.02, 500, 2.5e-7, ...
Parameters
----------
bin_count: Number of bins
range: Optional[tuple]
(min, max)
"""
subscales = np.array([0.5, 1, 2, 2.5, 5, 10])
# TODO: remove colliding kwargs
if data is None and range is None:
raise RuntimeError("Cannot guess optimum bin width without data.")
if bin_count is None:
bin_count = ideal_bin_count(data)
min_ = range[0] if range else data.min()
max_ = range[1] if range else data.max()
bw = (max_ - min_) / bin_count
power = np.floor(np.log10(bw)).astype(int)
best_index = np.argmin(np.abs(np.log(subscales * (10.0 ** power) / bw)))
bin_width = (10.0 ** power) * subscales[best_index]
return fixed_width_binning(bin_width=bin_width, data=data, range=range, **kwargs) | python | def human_binning(data=None, bin_count: Optional[int] = None, *, range=None, **kwargs) -> FixedWidthBinning:
"""Construct fixed-width ninning schema with bins automatically optimized to human-friendly widths.
Typical widths are: 1.0, 25,0, 0.02, 500, 2.5e-7, ...
Parameters
----------
bin_count: Number of bins
range: Optional[tuple]
(min, max)
"""
subscales = np.array([0.5, 1, 2, 2.5, 5, 10])
# TODO: remove colliding kwargs
if data is None and range is None:
raise RuntimeError("Cannot guess optimum bin width without data.")
if bin_count is None:
bin_count = ideal_bin_count(data)
min_ = range[0] if range else data.min()
max_ = range[1] if range else data.max()
bw = (max_ - min_) / bin_count
power = np.floor(np.log10(bw)).astype(int)
best_index = np.argmin(np.abs(np.log(subscales * (10.0 ** power) / bw)))
bin_width = (10.0 ** power) * subscales[best_index]
return fixed_width_binning(bin_width=bin_width, data=data, range=range, **kwargs) | [
"def",
"human_binning",
"(",
"data",
"=",
"None",
",",
"bin_count",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"*",
",",
"range",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"FixedWidthBinning",
":",
"subscales",
"=",
"np",
".",
"array",
"(",
"[",
"0.5",
",",
"1",
",",
"2",
",",
"2.5",
",",
"5",
",",
"10",
"]",
")",
"# TODO: remove colliding kwargs",
"if",
"data",
"is",
"None",
"and",
"range",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot guess optimum bin width without data.\"",
")",
"if",
"bin_count",
"is",
"None",
":",
"bin_count",
"=",
"ideal_bin_count",
"(",
"data",
")",
"min_",
"=",
"range",
"[",
"0",
"]",
"if",
"range",
"else",
"data",
".",
"min",
"(",
")",
"max_",
"=",
"range",
"[",
"1",
"]",
"if",
"range",
"else",
"data",
".",
"max",
"(",
")",
"bw",
"=",
"(",
"max_",
"-",
"min_",
")",
"/",
"bin_count",
"power",
"=",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"bw",
")",
")",
".",
"astype",
"(",
"int",
")",
"best_index",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"log",
"(",
"subscales",
"*",
"(",
"10.0",
"**",
"power",
")",
"/",
"bw",
")",
")",
")",
"bin_width",
"=",
"(",
"10.0",
"**",
"power",
")",
"*",
"subscales",
"[",
"best_index",
"]",
"return",
"fixed_width_binning",
"(",
"bin_width",
"=",
"bin_width",
",",
"data",
"=",
"data",
",",
"range",
"=",
"range",
",",
"*",
"*",
"kwargs",
")"
] | Construct fixed-width ninning schema with bins automatically optimized to human-friendly widths.
Typical widths are: 1.0, 25,0, 0.02, 500, 2.5e-7, ...
Parameters
----------
bin_count: Number of bins
range: Optional[tuple]
(min, max) | [
"Construct",
"fixed",
"-",
"width",
"ninning",
"schema",
"with",
"bins",
"automatically",
"optimized",
"to",
"human",
"-",
"friendly",
"widths",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L628-L653 | train |
janpipek/physt | physt/binnings.py | quantile_binning | def quantile_binning(data=None, bins=10, *, qrange=(0.0, 1.0), **kwargs) -> StaticBinning:
"""Binning schema based on quantile ranges.
This binning finds equally spaced quantiles. This should lead to
all bins having roughly the same frequencies.
Note: weights are not (yet) take into account for calculating
quantiles.
Parameters
----------
bins: sequence or Optional[int]
Number of bins
qrange: Optional[tuple]
Two floats as minimum and maximum quantile (default: 0.0, 1.0)
Returns
-------
StaticBinning
"""
if np.isscalar(bins):
bins = np.linspace(qrange[0] * 100, qrange[1] * 100, bins + 1)
bins = np.percentile(data, bins)
return static_binning(bins=make_bin_array(bins), includes_right_edge=True) | python | def quantile_binning(data=None, bins=10, *, qrange=(0.0, 1.0), **kwargs) -> StaticBinning:
"""Binning schema based on quantile ranges.
This binning finds equally spaced quantiles. This should lead to
all bins having roughly the same frequencies.
Note: weights are not (yet) take into account for calculating
quantiles.
Parameters
----------
bins: sequence or Optional[int]
Number of bins
qrange: Optional[tuple]
Two floats as minimum and maximum quantile (default: 0.0, 1.0)
Returns
-------
StaticBinning
"""
if np.isscalar(bins):
bins = np.linspace(qrange[0] * 100, qrange[1] * 100, bins + 1)
bins = np.percentile(data, bins)
return static_binning(bins=make_bin_array(bins), includes_right_edge=True) | [
"def",
"quantile_binning",
"(",
"data",
"=",
"None",
",",
"bins",
"=",
"10",
",",
"*",
",",
"qrange",
"=",
"(",
"0.0",
",",
"1.0",
")",
",",
"*",
"*",
"kwargs",
")",
"->",
"StaticBinning",
":",
"if",
"np",
".",
"isscalar",
"(",
"bins",
")",
":",
"bins",
"=",
"np",
".",
"linspace",
"(",
"qrange",
"[",
"0",
"]",
"*",
"100",
",",
"qrange",
"[",
"1",
"]",
"*",
"100",
",",
"bins",
"+",
"1",
")",
"bins",
"=",
"np",
".",
"percentile",
"(",
"data",
",",
"bins",
")",
"return",
"static_binning",
"(",
"bins",
"=",
"make_bin_array",
"(",
"bins",
")",
",",
"includes_right_edge",
"=",
"True",
")"
] | Binning schema based on quantile ranges.
This binning finds equally spaced quantiles. This should lead to
all bins having roughly the same frequencies.
Note: weights are not (yet) take into account for calculating
quantiles.
Parameters
----------
bins: sequence or Optional[int]
Number of bins
qrange: Optional[tuple]
Two floats as minimum and maximum quantile (default: 0.0, 1.0)
Returns
-------
StaticBinning | [
"Binning",
"schema",
"based",
"on",
"quantile",
"ranges",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L656-L680 | train |
janpipek/physt | physt/binnings.py | static_binning | def static_binning(data=None, bins=None, **kwargs) -> StaticBinning:
"""Construct static binning with whatever bins."""
return StaticBinning(bins=make_bin_array(bins), **kwargs) | python | def static_binning(data=None, bins=None, **kwargs) -> StaticBinning:
"""Construct static binning with whatever bins."""
return StaticBinning(bins=make_bin_array(bins), **kwargs) | [
"def",
"static_binning",
"(",
"data",
"=",
"None",
",",
"bins",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"StaticBinning",
":",
"return",
"StaticBinning",
"(",
"bins",
"=",
"make_bin_array",
"(",
"bins",
")",
",",
"*",
"*",
"kwargs",
")"
] | Construct static binning with whatever bins. | [
"Construct",
"static",
"binning",
"with",
"whatever",
"bins",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L683-L685 | train |
janpipek/physt | physt/binnings.py | integer_binning | def integer_binning(data=None, **kwargs) -> StaticBinning:
"""Construct fixed-width binning schema with bins centered around integers.
Parameters
----------
range: Optional[Tuple[int]]
min (included) and max integer (excluded) bin
bin_width: Optional[int]
group "bin_width" integers into one bin (not recommended)
"""
if "range" in kwargs:
kwargs["range"] = tuple(r - 0.5 for r in kwargs["range"])
return fixed_width_binning(data=data, bin_width=kwargs.pop("bin_width", 1),
align=True, bin_shift=0.5, **kwargs) | python | def integer_binning(data=None, **kwargs) -> StaticBinning:
"""Construct fixed-width binning schema with bins centered around integers.
Parameters
----------
range: Optional[Tuple[int]]
min (included) and max integer (excluded) bin
bin_width: Optional[int]
group "bin_width" integers into one bin (not recommended)
"""
if "range" in kwargs:
kwargs["range"] = tuple(r - 0.5 for r in kwargs["range"])
return fixed_width_binning(data=data, bin_width=kwargs.pop("bin_width", 1),
align=True, bin_shift=0.5, **kwargs) | [
"def",
"integer_binning",
"(",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"StaticBinning",
":",
"if",
"\"range\"",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"range\"",
"]",
"=",
"tuple",
"(",
"r",
"-",
"0.5",
"for",
"r",
"in",
"kwargs",
"[",
"\"range\"",
"]",
")",
"return",
"fixed_width_binning",
"(",
"data",
"=",
"data",
",",
"bin_width",
"=",
"kwargs",
".",
"pop",
"(",
"\"bin_width\"",
",",
"1",
")",
",",
"align",
"=",
"True",
",",
"bin_shift",
"=",
"0.5",
",",
"*",
"*",
"kwargs",
")"
] | Construct fixed-width binning schema with bins centered around integers.
Parameters
----------
range: Optional[Tuple[int]]
min (included) and max integer (excluded) bin
bin_width: Optional[int]
group "bin_width" integers into one bin (not recommended) | [
"Construct",
"fixed",
"-",
"width",
"binning",
"schema",
"with",
"bins",
"centered",
"around",
"integers",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L688-L701 | train |
janpipek/physt | physt/binnings.py | fixed_width_binning | def fixed_width_binning(data=None, bin_width: Union[float, int] = 1, *, range=None, includes_right_edge=False, **kwargs) -> FixedWidthBinning:
"""Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width
"""
result = FixedWidthBinning(bin_width=bin_width, includes_right_edge=includes_right_edge,
**kwargs)
if range:
result._force_bin_existence(range[0])
result._force_bin_existence(range[1], includes_right_edge=True)
if not kwargs.get("adaptive"):
return result # Otherwise we want to adapt to data
if data is not None and data.shape[0]:
# print("Jo, tady")
result._force_bin_existence([np.min(data), np.max(data)],
includes_right_edge=includes_right_edge)
return result | python | def fixed_width_binning(data=None, bin_width: Union[float, int] = 1, *, range=None, includes_right_edge=False, **kwargs) -> FixedWidthBinning:
"""Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width
"""
result = FixedWidthBinning(bin_width=bin_width, includes_right_edge=includes_right_edge,
**kwargs)
if range:
result._force_bin_existence(range[0])
result._force_bin_existence(range[1], includes_right_edge=True)
if not kwargs.get("adaptive"):
return result # Otherwise we want to adapt to data
if data is not None and data.shape[0]:
# print("Jo, tady")
result._force_bin_existence([np.min(data), np.max(data)],
includes_right_edge=includes_right_edge)
return result | [
"def",
"fixed_width_binning",
"(",
"data",
"=",
"None",
",",
"bin_width",
":",
"Union",
"[",
"float",
",",
"int",
"]",
"=",
"1",
",",
"*",
",",
"range",
"=",
"None",
",",
"includes_right_edge",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"FixedWidthBinning",
":",
"result",
"=",
"FixedWidthBinning",
"(",
"bin_width",
"=",
"bin_width",
",",
"includes_right_edge",
"=",
"includes_right_edge",
",",
"*",
"*",
"kwargs",
")",
"if",
"range",
":",
"result",
".",
"_force_bin_existence",
"(",
"range",
"[",
"0",
"]",
")",
"result",
".",
"_force_bin_existence",
"(",
"range",
"[",
"1",
"]",
",",
"includes_right_edge",
"=",
"True",
")",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"adaptive\"",
")",
":",
"return",
"result",
"# Otherwise we want to adapt to data",
"if",
"data",
"is",
"not",
"None",
"and",
"data",
".",
"shape",
"[",
"0",
"]",
":",
"# print(\"Jo, tady\")",
"result",
".",
"_force_bin_existence",
"(",
"[",
"np",
".",
"min",
"(",
"data",
")",
",",
"np",
".",
"max",
"(",
"data",
")",
"]",
",",
"includes_right_edge",
"=",
"includes_right_edge",
")",
"return",
"result"
] | Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width | [
"Construct",
"fixed",
"-",
"width",
"binning",
"schema",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L704-L726 | train |
janpipek/physt | physt/binnings.py | exponential_binning | def exponential_binning(data=None, bin_count: Optional[int] = None, *, range=None, **kwargs) -> ExponentialBinning:
"""Construct exponential binning schema.
Parameters
----------
bin_count: Optional[int]
Number of bins
range: Optional[tuple]
(min, max)
See also
--------
numpy.logspace - note that our range semantics is different
"""
if bin_count is None:
bin_count = ideal_bin_count(data)
if range:
range = (np.log10(range[0]), np.log10(range[1]))
else:
range = (np.log10(data.min()), np.log10(data.max()))
log_width = (range[1] - range[0]) / bin_count
return ExponentialBinning(log_min=range[0], log_width=log_width, bin_count=bin_count, **kwargs) | python | def exponential_binning(data=None, bin_count: Optional[int] = None, *, range=None, **kwargs) -> ExponentialBinning:
"""Construct exponential binning schema.
Parameters
----------
bin_count: Optional[int]
Number of bins
range: Optional[tuple]
(min, max)
See also
--------
numpy.logspace - note that our range semantics is different
"""
if bin_count is None:
bin_count = ideal_bin_count(data)
if range:
range = (np.log10(range[0]), np.log10(range[1]))
else:
range = (np.log10(data.min()), np.log10(data.max()))
log_width = (range[1] - range[0]) / bin_count
return ExponentialBinning(log_min=range[0], log_width=log_width, bin_count=bin_count, **kwargs) | [
"def",
"exponential_binning",
"(",
"data",
"=",
"None",
",",
"bin_count",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"*",
",",
"range",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"ExponentialBinning",
":",
"if",
"bin_count",
"is",
"None",
":",
"bin_count",
"=",
"ideal_bin_count",
"(",
"data",
")",
"if",
"range",
":",
"range",
"=",
"(",
"np",
".",
"log10",
"(",
"range",
"[",
"0",
"]",
")",
",",
"np",
".",
"log10",
"(",
"range",
"[",
"1",
"]",
")",
")",
"else",
":",
"range",
"=",
"(",
"np",
".",
"log10",
"(",
"data",
".",
"min",
"(",
")",
")",
",",
"np",
".",
"log10",
"(",
"data",
".",
"max",
"(",
")",
")",
")",
"log_width",
"=",
"(",
"range",
"[",
"1",
"]",
"-",
"range",
"[",
"0",
"]",
")",
"/",
"bin_count",
"return",
"ExponentialBinning",
"(",
"log_min",
"=",
"range",
"[",
"0",
"]",
",",
"log_width",
"=",
"log_width",
",",
"bin_count",
"=",
"bin_count",
",",
"*",
"*",
"kwargs",
")"
] | Construct exponential binning schema.
Parameters
----------
bin_count: Optional[int]
Number of bins
range: Optional[tuple]
(min, max)
See also
--------
numpy.logspace - note that our range semantics is different | [
"Construct",
"exponential",
"binning",
"schema",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L729-L751 | train |
janpipek/physt | physt/binnings.py | calculate_bins | def calculate_bins(array, _=None, *args, **kwargs) -> BinningBase:
"""Find optimal binning from arguments.
Parameters
----------
array: arraylike
Data from which the bins should be decided (sometimes used, sometimes not)
_: int or str or Callable or arraylike or Iterable or BinningBase
To-be-guessed parameter that specifies what kind of binning should be done
check_nan: bool
Check for the presence of nan's in array? Default: True
range: tuple
Limit values to a range. Some of the binning methods also (subsequently)
use this parameter for the bin shape.
Returns
-------
BinningBase
A two-dimensional array with pairs of bin edges (not necessarily consecutive).
"""
if array is not None:
if kwargs.pop("check_nan", True):
if np.any(np.isnan(array)):
raise RuntimeError("Cannot calculate bins in presence of NaN's.")
if kwargs.get("range", None): # TODO: re-consider the usage of this parameter
array = array[(array >= kwargs["range"][0]) & (array <= kwargs["range"][1])]
if _ is None:
bin_count = 10 # kwargs.pop("bins", ideal_bin_count(data=array)) - same as numpy
binning = numpy_binning(array, bin_count, *args, **kwargs)
elif isinstance(_, BinningBase):
binning = _
elif isinstance(_, int):
binning = numpy_binning(array, _, *args, **kwargs)
elif isinstance(_, str):
# What about the ranges???
if _ in bincount_methods:
bin_count = ideal_bin_count(array, method=_)
binning = numpy_binning(array, bin_count, *args, **kwargs)
elif _ in binning_methods:
method = binning_methods[_]
binning = method(array, *args, **kwargs)
else:
raise RuntimeError("No binning method {0} available.".format(_))
elif callable(_):
binning = _(array, *args, **kwargs)
elif np.iterable(_):
binning = static_binning(array, _, *args, **kwargs)
else:
raise RuntimeError("Binning {0} not understood.".format(_))
return binning | python | def calculate_bins(array, _=None, *args, **kwargs) -> BinningBase:
"""Find optimal binning from arguments.
Parameters
----------
array: arraylike
Data from which the bins should be decided (sometimes used, sometimes not)
_: int or str or Callable or arraylike or Iterable or BinningBase
To-be-guessed parameter that specifies what kind of binning should be done
check_nan: bool
Check for the presence of nan's in array? Default: True
range: tuple
Limit values to a range. Some of the binning methods also (subsequently)
use this parameter for the bin shape.
Returns
-------
BinningBase
A two-dimensional array with pairs of bin edges (not necessarily consecutive).
"""
if array is not None:
if kwargs.pop("check_nan", True):
if np.any(np.isnan(array)):
raise RuntimeError("Cannot calculate bins in presence of NaN's.")
if kwargs.get("range", None): # TODO: re-consider the usage of this parameter
array = array[(array >= kwargs["range"][0]) & (array <= kwargs["range"][1])]
if _ is None:
bin_count = 10 # kwargs.pop("bins", ideal_bin_count(data=array)) - same as numpy
binning = numpy_binning(array, bin_count, *args, **kwargs)
elif isinstance(_, BinningBase):
binning = _
elif isinstance(_, int):
binning = numpy_binning(array, _, *args, **kwargs)
elif isinstance(_, str):
# What about the ranges???
if _ in bincount_methods:
bin_count = ideal_bin_count(array, method=_)
binning = numpy_binning(array, bin_count, *args, **kwargs)
elif _ in binning_methods:
method = binning_methods[_]
binning = method(array, *args, **kwargs)
else:
raise RuntimeError("No binning method {0} available.".format(_))
elif callable(_):
binning = _(array, *args, **kwargs)
elif np.iterable(_):
binning = static_binning(array, _, *args, **kwargs)
else:
raise RuntimeError("Binning {0} not understood.".format(_))
return binning | [
"def",
"calculate_bins",
"(",
"array",
",",
"_",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"BinningBase",
":",
"if",
"array",
"is",
"not",
"None",
":",
"if",
"kwargs",
".",
"pop",
"(",
"\"check_nan\"",
",",
"True",
")",
":",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"array",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot calculate bins in presence of NaN's.\"",
")",
"if",
"kwargs",
".",
"get",
"(",
"\"range\"",
",",
"None",
")",
":",
"# TODO: re-consider the usage of this parameter",
"array",
"=",
"array",
"[",
"(",
"array",
">=",
"kwargs",
"[",
"\"range\"",
"]",
"[",
"0",
"]",
")",
"&",
"(",
"array",
"<=",
"kwargs",
"[",
"\"range\"",
"]",
"[",
"1",
"]",
")",
"]",
"if",
"_",
"is",
"None",
":",
"bin_count",
"=",
"10",
"# kwargs.pop(\"bins\", ideal_bin_count(data=array)) - same as numpy",
"binning",
"=",
"numpy_binning",
"(",
"array",
",",
"bin_count",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"_",
",",
"BinningBase",
")",
":",
"binning",
"=",
"_",
"elif",
"isinstance",
"(",
"_",
",",
"int",
")",
":",
"binning",
"=",
"numpy_binning",
"(",
"array",
",",
"_",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"_",
",",
"str",
")",
":",
"# What about the ranges???",
"if",
"_",
"in",
"bincount_methods",
":",
"bin_count",
"=",
"ideal_bin_count",
"(",
"array",
",",
"method",
"=",
"_",
")",
"binning",
"=",
"numpy_binning",
"(",
"array",
",",
"bin_count",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"_",
"in",
"binning_methods",
":",
"method",
"=",
"binning_methods",
"[",
"_",
"]",
"binning",
"=",
"method",
"(",
"array",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No binning method {0} available.\"",
".",
"format",
"(",
"_",
")",
")",
"elif",
"callable",
"(",
"_",
")",
":",
"binning",
"=",
"_",
"(",
"array",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"np",
".",
"iterable",
"(",
"_",
")",
":",
"binning",
"=",
"static_binning",
"(",
"array",
",",
"_",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Binning {0} not understood.\"",
".",
"format",
"(",
"_",
")",
")",
"return",
"binning"
] | Find optimal binning from arguments.
Parameters
----------
array: arraylike
Data from which the bins should be decided (sometimes used, sometimes not)
_: int or str or Callable or arraylike or Iterable or BinningBase
To-be-guessed parameter that specifies what kind of binning should be done
check_nan: bool
Check for the presence of nan's in array? Default: True
range: tuple
Limit values to a range. Some of the binning methods also (subsequently)
use this parameter for the bin shape.
Returns
-------
BinningBase
A two-dimensional array with pairs of bin edges (not necessarily consecutive). | [
"Find",
"optimal",
"binning",
"from",
"arguments",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L754-L804 | train |
janpipek/physt | physt/binnings.py | ideal_bin_count | def ideal_bin_count(data, method: str = "default") -> int:
"""A theoretically ideal bin count.
Parameters
----------
data: array_likes
Data to work on. Most methods don't use this.
method: str
Name of the method to apply, available values:
- default (~sturges)
- sqrt
- sturges
- doane
- rice
See https://en.wikipedia.org/wiki/Histogram for the description
"""
n = data.size
if n < 1:
return 1
if method == "default":
if n <= 32:
return 7
else:
return ideal_bin_count(data, "sturges")
elif method == "sqrt":
return int(np.ceil(np.sqrt(n)))
elif method == "sturges":
return int(np.ceil(np.log2(n)) + 1)
elif method == "doane":
if n < 3:
return 1
from scipy.stats import skew
sigma = np.sqrt(6 * (n-2) / (n + 1) * (n + 3))
return int(np.ceil(1 + np.log2(n) + np.log2(1 + np.abs(skew(data)) / sigma)))
elif method == "rice":
return int(np.ceil(2 * np.power(n, 1 / 3))) | python | def ideal_bin_count(data, method: str = "default") -> int:
"""A theoretically ideal bin count.
Parameters
----------
data: array_likes
Data to work on. Most methods don't use this.
method: str
Name of the method to apply, available values:
- default (~sturges)
- sqrt
- sturges
- doane
- rice
See https://en.wikipedia.org/wiki/Histogram for the description
"""
n = data.size
if n < 1:
return 1
if method == "default":
if n <= 32:
return 7
else:
return ideal_bin_count(data, "sturges")
elif method == "sqrt":
return int(np.ceil(np.sqrt(n)))
elif method == "sturges":
return int(np.ceil(np.log2(n)) + 1)
elif method == "doane":
if n < 3:
return 1
from scipy.stats import skew
sigma = np.sqrt(6 * (n-2) / (n + 1) * (n + 3))
return int(np.ceil(1 + np.log2(n) + np.log2(1 + np.abs(skew(data)) / sigma)))
elif method == "rice":
return int(np.ceil(2 * np.power(n, 1 / 3))) | [
"def",
"ideal_bin_count",
"(",
"data",
",",
"method",
":",
"str",
"=",
"\"default\"",
")",
"->",
"int",
":",
"n",
"=",
"data",
".",
"size",
"if",
"n",
"<",
"1",
":",
"return",
"1",
"if",
"method",
"==",
"\"default\"",
":",
"if",
"n",
"<=",
"32",
":",
"return",
"7",
"else",
":",
"return",
"ideal_bin_count",
"(",
"data",
",",
"\"sturges\"",
")",
"elif",
"method",
"==",
"\"sqrt\"",
":",
"return",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"sqrt",
"(",
"n",
")",
")",
")",
"elif",
"method",
"==",
"\"sturges\"",
":",
"return",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"n",
")",
")",
"+",
"1",
")",
"elif",
"method",
"==",
"\"doane\"",
":",
"if",
"n",
"<",
"3",
":",
"return",
"1",
"from",
"scipy",
".",
"stats",
"import",
"skew",
"sigma",
"=",
"np",
".",
"sqrt",
"(",
"6",
"*",
"(",
"n",
"-",
"2",
")",
"/",
"(",
"n",
"+",
"1",
")",
"*",
"(",
"n",
"+",
"3",
")",
")",
"return",
"int",
"(",
"np",
".",
"ceil",
"(",
"1",
"+",
"np",
".",
"log2",
"(",
"n",
")",
"+",
"np",
".",
"log2",
"(",
"1",
"+",
"np",
".",
"abs",
"(",
"skew",
"(",
"data",
")",
")",
"/",
"sigma",
")",
")",
")",
"elif",
"method",
"==",
"\"rice\"",
":",
"return",
"int",
"(",
"np",
".",
"ceil",
"(",
"2",
"*",
"np",
".",
"power",
"(",
"n",
",",
"1",
"/",
"3",
")",
")",
")"
] | A theoretically ideal bin count.
Parameters
----------
data: array_likes
Data to work on. Most methods don't use this.
method: str
Name of the method to apply, available values:
- default (~sturges)
- sqrt
- sturges
- doane
- rice
See https://en.wikipedia.org/wiki/Histogram for the description | [
"A",
"theoretically",
"ideal",
"bin",
"count",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L972-L1007 | train |
janpipek/physt | physt/binnings.py | as_binning | def as_binning(obj, copy: bool = False) -> BinningBase:
"""Ensure that an object is a binning
Parameters
---------
obj : BinningBase or array_like
Can be a binning, numpy-like bins or full physt bins
copy : If true, ensure that the returned object is independent
"""
if isinstance(obj, BinningBase):
if copy:
return obj.copy()
else:
return obj
else:
bins = make_bin_array(obj)
return StaticBinning(bins) | python | def as_binning(obj, copy: bool = False) -> BinningBase:
"""Ensure that an object is a binning
Parameters
---------
obj : BinningBase or array_like
Can be a binning, numpy-like bins or full physt bins
copy : If true, ensure that the returned object is independent
"""
if isinstance(obj, BinningBase):
if copy:
return obj.copy()
else:
return obj
else:
bins = make_bin_array(obj)
return StaticBinning(bins) | [
"def",
"as_binning",
"(",
"obj",
",",
"copy",
":",
"bool",
"=",
"False",
")",
"->",
"BinningBase",
":",
"if",
"isinstance",
"(",
"obj",
",",
"BinningBase",
")",
":",
"if",
"copy",
":",
"return",
"obj",
".",
"copy",
"(",
")",
"else",
":",
"return",
"obj",
"else",
":",
"bins",
"=",
"make_bin_array",
"(",
"obj",
")",
"return",
"StaticBinning",
"(",
"bins",
")"
] | Ensure that an object is a binning
Parameters
---------
obj : BinningBase or array_like
Can be a binning, numpy-like bins or full physt bins
copy : If true, ensure that the returned object is independent | [
"Ensure",
"that",
"an",
"object",
"is",
"a",
"binning"
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L1013-L1029 | train |
janpipek/physt | physt/binnings.py | BinningBase.to_dict | def to_dict(self) -> OrderedDict:
"""Dictionary representation of the binning schema.
This serves as template method, please implement _update_dict
"""
result = OrderedDict()
result["adaptive"] = self._adaptive
result["binning_type"] = type(self).__name__
self._update_dict(result)
return result | python | def to_dict(self) -> OrderedDict:
"""Dictionary representation of the binning schema.
This serves as template method, please implement _update_dict
"""
result = OrderedDict()
result["adaptive"] = self._adaptive
result["binning_type"] = type(self).__name__
self._update_dict(result)
return result | [
"def",
"to_dict",
"(",
"self",
")",
"->",
"OrderedDict",
":",
"result",
"=",
"OrderedDict",
"(",
")",
"result",
"[",
"\"adaptive\"",
"]",
"=",
"self",
".",
"_adaptive",
"result",
"[",
"\"binning_type\"",
"]",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"self",
".",
"_update_dict",
"(",
"result",
")",
"return",
"result"
] | Dictionary representation of the binning schema.
This serves as template method, please implement _update_dict | [
"Dictionary",
"representation",
"of",
"the",
"binning",
"schema",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L77-L86 | train |
janpipek/physt | physt/binnings.py | BinningBase.is_regular | def is_regular(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool:
"""Whether all bins have the same width.
Parameters
----------
rtol, atol : numpy tolerance parameters
"""
return np.allclose(np.diff(self.bins[1] - self.bins[0]), 0.0, rtol=rtol, atol=atol) | python | def is_regular(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool:
"""Whether all bins have the same width.
Parameters
----------
rtol, atol : numpy tolerance parameters
"""
return np.allclose(np.diff(self.bins[1] - self.bins[0]), 0.0, rtol=rtol, atol=atol) | [
"def",
"is_regular",
"(",
"self",
",",
"rtol",
":",
"float",
"=",
"1.e-5",
",",
"atol",
":",
"float",
"=",
"1.e-8",
")",
"->",
"bool",
":",
"return",
"np",
".",
"allclose",
"(",
"np",
".",
"diff",
"(",
"self",
".",
"bins",
"[",
"1",
"]",
"-",
"self",
".",
"bins",
"[",
"0",
"]",
")",
",",
"0.0",
",",
"rtol",
"=",
"rtol",
",",
"atol",
"=",
"atol",
")"
] | Whether all bins have the same width.
Parameters
----------
rtol, atol : numpy tolerance parameters | [
"Whether",
"all",
"bins",
"have",
"the",
"same",
"width",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L97-L104 | train |
janpipek/physt | physt/binnings.py | BinningBase.is_consecutive | def is_consecutive(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool:
"""Whether all bins are in a growing order.
Parameters
----------
rtol, atol : numpy tolerance parameters
"""
if self.inconsecutive_allowed:
if self._consecutive is None:
if self._numpy_bins is not None:
self._consecutive = True
self._consecutive = is_consecutive(self.bins, rtol, atol)
return self._consecutive
else:
return True | python | def is_consecutive(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool:
"""Whether all bins are in a growing order.
Parameters
----------
rtol, atol : numpy tolerance parameters
"""
if self.inconsecutive_allowed:
if self._consecutive is None:
if self._numpy_bins is not None:
self._consecutive = True
self._consecutive = is_consecutive(self.bins, rtol, atol)
return self._consecutive
else:
return True | [
"def",
"is_consecutive",
"(",
"self",
",",
"rtol",
":",
"float",
"=",
"1.e-5",
",",
"atol",
":",
"float",
"=",
"1.e-8",
")",
"->",
"bool",
":",
"if",
"self",
".",
"inconsecutive_allowed",
":",
"if",
"self",
".",
"_consecutive",
"is",
"None",
":",
"if",
"self",
".",
"_numpy_bins",
"is",
"not",
"None",
":",
"self",
".",
"_consecutive",
"=",
"True",
"self",
".",
"_consecutive",
"=",
"is_consecutive",
"(",
"self",
".",
"bins",
",",
"rtol",
",",
"atol",
")",
"return",
"self",
".",
"_consecutive",
"else",
":",
"return",
"True"
] | Whether all bins are in a growing order.
Parameters
----------
rtol, atol : numpy tolerance parameters | [
"Whether",
"all",
"bins",
"are",
"in",
"a",
"growing",
"order",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L106-L120 | train |
janpipek/physt | physt/binnings.py | BinningBase.adapt | def adapt(self, other: 'BinningBase'):
"""Adapt this binning so that it contains all bins of another binning.
Parameters
----------
other: BinningBase
"""
# TODO: in-place arg
if np.array_equal(self.bins, other.bins):
return None, None
elif not self.is_adaptive():
raise RuntimeError("Cannot adapt non-adaptive binning.")
else:
return self._adapt(other) | python | def adapt(self, other: 'BinningBase'):
"""Adapt this binning so that it contains all bins of another binning.
Parameters
----------
other: BinningBase
"""
# TODO: in-place arg
if np.array_equal(self.bins, other.bins):
return None, None
elif not self.is_adaptive():
raise RuntimeError("Cannot adapt non-adaptive binning.")
else:
return self._adapt(other) | [
"def",
"adapt",
"(",
"self",
",",
"other",
":",
"'BinningBase'",
")",
":",
"# TODO: in-place arg",
"if",
"np",
".",
"array_equal",
"(",
"self",
".",
"bins",
",",
"other",
".",
"bins",
")",
":",
"return",
"None",
",",
"None",
"elif",
"not",
"self",
".",
"is_adaptive",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot adapt non-adaptive binning.\"",
")",
"else",
":",
"return",
"self",
".",
"_adapt",
"(",
"other",
")"
] | Adapt this binning so that it contains all bins of another binning.
Parameters
----------
other: BinningBase | [
"Adapt",
"this",
"binning",
"so",
"that",
"it",
"contains",
"all",
"bins",
"of",
"another",
"binning",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L154-L167 | train |
janpipek/physt | physt/binnings.py | BinningBase.numpy_bins | def numpy_bins(self) -> np.ndarray:
"""Bins in the numpy format
This might not be available for inconsecutive binnings.
Returns
-------
edges: np.ndarray
shape=(bin_count+1,)
"""
if self._numpy_bins is None:
self._numpy_bins = to_numpy_bins(self.bins)
return self._numpy_bins | python | def numpy_bins(self) -> np.ndarray:
"""Bins in the numpy format
This might not be available for inconsecutive binnings.
Returns
-------
edges: np.ndarray
shape=(bin_count+1,)
"""
if self._numpy_bins is None:
self._numpy_bins = to_numpy_bins(self.bins)
return self._numpy_bins | [
"def",
"numpy_bins",
"(",
"self",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"self",
".",
"_numpy_bins",
"is",
"None",
":",
"self",
".",
"_numpy_bins",
"=",
"to_numpy_bins",
"(",
"self",
".",
"bins",
")",
"return",
"self",
".",
"_numpy_bins"
] | Bins in the numpy format
This might not be available for inconsecutive binnings.
Returns
-------
edges: np.ndarray
shape=(bin_count+1,) | [
"Bins",
"in",
"the",
"numpy",
"format"
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L200-L212 | train |
janpipek/physt | physt/binnings.py | BinningBase.numpy_bins_with_mask | def numpy_bins_with_mask(self) -> Tuple[np.ndarray, np.ndarray]:
"""Bins in the numpy format, including the gaps in inconsecutive binnings.
Returns
-------
edges, mask: np.ndarray
See Also
--------
bin_utils.to_numpy_bins_with_mask
"""
bwm = to_numpy_bins_with_mask(self.bins)
if not self.includes_right_edge:
bwm[0].append(np.inf)
return bwm | python | def numpy_bins_with_mask(self) -> Tuple[np.ndarray, np.ndarray]:
"""Bins in the numpy format, including the gaps in inconsecutive binnings.
Returns
-------
edges, mask: np.ndarray
See Also
--------
bin_utils.to_numpy_bins_with_mask
"""
bwm = to_numpy_bins_with_mask(self.bins)
if not self.includes_right_edge:
bwm[0].append(np.inf)
return bwm | [
"def",
"numpy_bins_with_mask",
"(",
"self",
")",
"->",
"Tuple",
"[",
"np",
".",
"ndarray",
",",
"np",
".",
"ndarray",
"]",
":",
"bwm",
"=",
"to_numpy_bins_with_mask",
"(",
"self",
".",
"bins",
")",
"if",
"not",
"self",
".",
"includes_right_edge",
":",
"bwm",
"[",
"0",
"]",
".",
"append",
"(",
"np",
".",
"inf",
")",
"return",
"bwm"
] | Bins in the numpy format, including the gaps in inconsecutive binnings.
Returns
-------
edges, mask: np.ndarray
See Also
--------
bin_utils.to_numpy_bins_with_mask | [
"Bins",
"in",
"the",
"numpy",
"format",
"including",
"the",
"gaps",
"in",
"inconsecutive",
"binnings",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L215-L229 | train |
janpipek/physt | physt/binnings.py | StaticBinning.as_static | def as_static(self, copy: bool = True) -> 'StaticBinning':
"""Convert binning to a static form.
Returns
-------
StaticBinning
A new static binning with a copy of bins.
Parameters
----------
copy : if True, returns itself (already satisfying conditions).
"""
if copy:
return StaticBinning(bins=self.bins.copy(),
includes_right_edge=self.includes_right_edge)
else:
return self | python | def as_static(self, copy: bool = True) -> 'StaticBinning':
"""Convert binning to a static form.
Returns
-------
StaticBinning
A new static binning with a copy of bins.
Parameters
----------
copy : if True, returns itself (already satisfying conditions).
"""
if copy:
return StaticBinning(bins=self.bins.copy(),
includes_right_edge=self.includes_right_edge)
else:
return self | [
"def",
"as_static",
"(",
"self",
",",
"copy",
":",
"bool",
"=",
"True",
")",
"->",
"'StaticBinning'",
":",
"if",
"copy",
":",
"return",
"StaticBinning",
"(",
"bins",
"=",
"self",
".",
"bins",
".",
"copy",
"(",
")",
",",
"includes_right_edge",
"=",
"self",
".",
"includes_right_edge",
")",
"else",
":",
"return",
"self"
] | Convert binning to a static form.
Returns
-------
StaticBinning
A new static binning with a copy of bins.
Parameters
----------
copy : if True, returns itself (already satisfying conditions). | [
"Convert",
"binning",
"to",
"a",
"static",
"form",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L319-L335 | train |
janpipek/physt | physt/compat/dask.py | histogram1d | def histogram1d(data, bins=None, *args, **kwargs):
"""Facade function to create one-dimensional histogram using dask.
Parameters
----------
data: dask.DaskArray or array-like
See also
--------
physt.histogram
"""
import dask
if not hasattr(data, "dask"):
data = dask.array.from_array(data, chunks=int(data.shape[0] / options["chunk_split"]))
if not kwargs.get("adaptive", True):
raise RuntimeError("Only adaptive histograms supported for dask (currently).")
kwargs["adaptive"] = True
def block_hist(array):
return original_h1(array, bins, *args, **kwargs)
return _run_dask(
name="dask_adaptive1d",
data=data,
compute=kwargs.pop("compute", True),
method=kwargs.pop("dask_method", "threaded"),
func=block_hist) | python | def histogram1d(data, bins=None, *args, **kwargs):
"""Facade function to create one-dimensional histogram using dask.
Parameters
----------
data: dask.DaskArray or array-like
See also
--------
physt.histogram
"""
import dask
if not hasattr(data, "dask"):
data = dask.array.from_array(data, chunks=int(data.shape[0] / options["chunk_split"]))
if not kwargs.get("adaptive", True):
raise RuntimeError("Only adaptive histograms supported for dask (currently).")
kwargs["adaptive"] = True
def block_hist(array):
return original_h1(array, bins, *args, **kwargs)
return _run_dask(
name="dask_adaptive1d",
data=data,
compute=kwargs.pop("compute", True),
method=kwargs.pop("dask_method", "threaded"),
func=block_hist) | [
"def",
"histogram1d",
"(",
"data",
",",
"bins",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"dask",
"if",
"not",
"hasattr",
"(",
"data",
",",
"\"dask\"",
")",
":",
"data",
"=",
"dask",
".",
"array",
".",
"from_array",
"(",
"data",
",",
"chunks",
"=",
"int",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"/",
"options",
"[",
"\"chunk_split\"",
"]",
")",
")",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"adaptive\"",
",",
"True",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Only adaptive histograms supported for dask (currently).\"",
")",
"kwargs",
"[",
"\"adaptive\"",
"]",
"=",
"True",
"def",
"block_hist",
"(",
"array",
")",
":",
"return",
"original_h1",
"(",
"array",
",",
"bins",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_run_dask",
"(",
"name",
"=",
"\"dask_adaptive1d\"",
",",
"data",
"=",
"data",
",",
"compute",
"=",
"kwargs",
".",
"pop",
"(",
"\"compute\"",
",",
"True",
")",
",",
"method",
"=",
"kwargs",
".",
"pop",
"(",
"\"dask_method\"",
",",
"\"threaded\"",
")",
",",
"func",
"=",
"block_hist",
")"
] | Facade function to create one-dimensional histogram using dask.
Parameters
----------
data: dask.DaskArray or array-like
See also
--------
physt.histogram | [
"Facade",
"function",
"to",
"create",
"one",
"-",
"dimensional",
"histogram",
"using",
"dask",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/compat/dask.py#L30-L57 | train |
janpipek/physt | physt/compat/dask.py | histogram2d | def histogram2d(data1, data2, bins=None, *args, **kwargs):
"""Facade function to create 2D histogram using dask."""
# TODO: currently very unoptimized! for non-dasks
import dask
if "axis_names" not in kwargs:
if hasattr(data1, "name") and hasattr(data2, "name"):
kwargs["axis_names"] = [data1.name, data2.name]
if not hasattr(data1, "dask"):
data1 = dask.array.from_array(data1, chunks=data1.size() / 100)
if not hasattr(data2, "dask"):
data2 = dask.array.from_array(data2, chunks=data2.size() / 100)
data = dask.array.stack([data1, data2], axis=1)
kwargs["dim"] = 2
return histogramdd(data, bins, *args, **kwargs) | python | def histogram2d(data1, data2, bins=None, *args, **kwargs):
"""Facade function to create 2D histogram using dask."""
# TODO: currently very unoptimized! for non-dasks
import dask
if "axis_names" not in kwargs:
if hasattr(data1, "name") and hasattr(data2, "name"):
kwargs["axis_names"] = [data1.name, data2.name]
if not hasattr(data1, "dask"):
data1 = dask.array.from_array(data1, chunks=data1.size() / 100)
if not hasattr(data2, "dask"):
data2 = dask.array.from_array(data2, chunks=data2.size() / 100)
data = dask.array.stack([data1, data2], axis=1)
kwargs["dim"] = 2
return histogramdd(data, bins, *args, **kwargs) | [
"def",
"histogram2d",
"(",
"data1",
",",
"data2",
",",
"bins",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: currently very unoptimized! for non-dasks",
"import",
"dask",
"if",
"\"axis_names\"",
"not",
"in",
"kwargs",
":",
"if",
"hasattr",
"(",
"data1",
",",
"\"name\"",
")",
"and",
"hasattr",
"(",
"data2",
",",
"\"name\"",
")",
":",
"kwargs",
"[",
"\"axis_names\"",
"]",
"=",
"[",
"data1",
".",
"name",
",",
"data2",
".",
"name",
"]",
"if",
"not",
"hasattr",
"(",
"data1",
",",
"\"dask\"",
")",
":",
"data1",
"=",
"dask",
".",
"array",
".",
"from_array",
"(",
"data1",
",",
"chunks",
"=",
"data1",
".",
"size",
"(",
")",
"/",
"100",
")",
"if",
"not",
"hasattr",
"(",
"data2",
",",
"\"dask\"",
")",
":",
"data2",
"=",
"dask",
".",
"array",
".",
"from_array",
"(",
"data2",
",",
"chunks",
"=",
"data2",
".",
"size",
"(",
")",
"/",
"100",
")",
"data",
"=",
"dask",
".",
"array",
".",
"stack",
"(",
"[",
"data1",
",",
"data2",
"]",
",",
"axis",
"=",
"1",
")",
"kwargs",
"[",
"\"dim\"",
"]",
"=",
"2",
"return",
"histogramdd",
"(",
"data",
",",
"bins",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Facade function to create 2D histogram using dask. | [
"Facade",
"function",
"to",
"create",
"2D",
"histogram",
"using",
"dask",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/compat/dask.py#L88-L102 | train |
janpipek/physt | physt/util.py | all_subclasses | def all_subclasses(cls: type) -> Tuple[type, ...]:
"""All subclasses of a class.
From: http://stackoverflow.com/a/17246726/2692780
"""
subclasses = []
for subclass in cls.__subclasses__():
subclasses.append(subclass)
subclasses.extend(all_subclasses(subclass))
return tuple(subclasses) | python | def all_subclasses(cls: type) -> Tuple[type, ...]:
"""All subclasses of a class.
From: http://stackoverflow.com/a/17246726/2692780
"""
subclasses = []
for subclass in cls.__subclasses__():
subclasses.append(subclass)
subclasses.extend(all_subclasses(subclass))
return tuple(subclasses) | [
"def",
"all_subclasses",
"(",
"cls",
":",
"type",
")",
"->",
"Tuple",
"[",
"type",
",",
"...",
"]",
":",
"subclasses",
"=",
"[",
"]",
"for",
"subclass",
"in",
"cls",
".",
"__subclasses__",
"(",
")",
":",
"subclasses",
".",
"append",
"(",
"subclass",
")",
"subclasses",
".",
"extend",
"(",
"all_subclasses",
"(",
"subclass",
")",
")",
"return",
"tuple",
"(",
"subclasses",
")"
] | All subclasses of a class.
From: http://stackoverflow.com/a/17246726/2692780 | [
"All",
"subclasses",
"of",
"a",
"class",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/util.py#L9-L18 | train |
janpipek/physt | physt/util.py | find_subclass | def find_subclass(base: type, name: str) -> type:
"""Find a named subclass of a base class.
Uses only the class name without namespace.
"""
class_candidates = [klass
for klass in all_subclasses(base)
if klass.__name__ == name
]
if len(class_candidates) == 0:
raise RuntimeError("No \"{0}\" subclass of \"{1}\".".format(base.__name__, name))
elif len(class_candidates) > 1:
raise RuntimeError("Multiple \"{0}\" subclasses of \"{1}\".".format(base.__name__, name))
return class_candidates[0] | python | def find_subclass(base: type, name: str) -> type:
"""Find a named subclass of a base class.
Uses only the class name without namespace.
"""
class_candidates = [klass
for klass in all_subclasses(base)
if klass.__name__ == name
]
if len(class_candidates) == 0:
raise RuntimeError("No \"{0}\" subclass of \"{1}\".".format(base.__name__, name))
elif len(class_candidates) > 1:
raise RuntimeError("Multiple \"{0}\" subclasses of \"{1}\".".format(base.__name__, name))
return class_candidates[0] | [
"def",
"find_subclass",
"(",
"base",
":",
"type",
",",
"name",
":",
"str",
")",
"->",
"type",
":",
"class_candidates",
"=",
"[",
"klass",
"for",
"klass",
"in",
"all_subclasses",
"(",
"base",
")",
"if",
"klass",
".",
"__name__",
"==",
"name",
"]",
"if",
"len",
"(",
"class_candidates",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"No \\\"{0}\\\" subclass of \\\"{1}\\\".\"",
".",
"format",
"(",
"base",
".",
"__name__",
",",
"name",
")",
")",
"elif",
"len",
"(",
"class_candidates",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Multiple \\\"{0}\\\" subclasses of \\\"{1}\\\".\"",
".",
"format",
"(",
"base",
".",
"__name__",
",",
"name",
")",
")",
"return",
"class_candidates",
"[",
"0",
"]"
] | Find a named subclass of a base class.
Uses only the class name without namespace. | [
"Find",
"a",
"named",
"subclass",
"of",
"a",
"base",
"class",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/util.py#L21-L34 | train |
janpipek/physt | physt/histogram_collection.py | HistogramCollection.add | def add(self, histogram: Histogram1D):
"""Add a histogram to the collection."""
if self.binning and not self.binning == histogram.binning:
raise ValueError("Cannot add histogram with different binning.")
self.histograms.append(histogram) | python | def add(self, histogram: Histogram1D):
"""Add a histogram to the collection."""
if self.binning and not self.binning == histogram.binning:
raise ValueError("Cannot add histogram with different binning.")
self.histograms.append(histogram) | [
"def",
"add",
"(",
"self",
",",
"histogram",
":",
"Histogram1D",
")",
":",
"if",
"self",
".",
"binning",
"and",
"not",
"self",
".",
"binning",
"==",
"histogram",
".",
"binning",
":",
"raise",
"ValueError",
"(",
"\"Cannot add histogram with different binning.\"",
")",
"self",
".",
"histograms",
".",
"append",
"(",
"histogram",
")"
] | Add a histogram to the collection. | [
"Add",
"a",
"histogram",
"to",
"the",
"collection",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_collection.py#L80-L84 | train |
janpipek/physt | physt/histogram_collection.py | HistogramCollection.normalize_bins | def normalize_bins(self, inplace: bool = False) -> "HistogramCollection":
"""Normalize each bin in the collection so that the sum is 1.0 for each bin.
Note: If a bin is zero in all collections, the result will be inf.
"""
col = self if inplace else self.copy()
sums = self.sum().frequencies
for h in col.histograms:
h.set_dtype(float)
h._frequencies /= sums
h._errors2 /= sums ** 2 # TODO: Does this make sense?
return col | python | def normalize_bins(self, inplace: bool = False) -> "HistogramCollection":
"""Normalize each bin in the collection so that the sum is 1.0 for each bin.
Note: If a bin is zero in all collections, the result will be inf.
"""
col = self if inplace else self.copy()
sums = self.sum().frequencies
for h in col.histograms:
h.set_dtype(float)
h._frequencies /= sums
h._errors2 /= sums ** 2 # TODO: Does this make sense?
return col | [
"def",
"normalize_bins",
"(",
"self",
",",
"inplace",
":",
"bool",
"=",
"False",
")",
"->",
"\"HistogramCollection\"",
":",
"col",
"=",
"self",
"if",
"inplace",
"else",
"self",
".",
"copy",
"(",
")",
"sums",
"=",
"self",
".",
"sum",
"(",
")",
".",
"frequencies",
"for",
"h",
"in",
"col",
".",
"histograms",
":",
"h",
".",
"set_dtype",
"(",
"float",
")",
"h",
".",
"_frequencies",
"/=",
"sums",
"h",
".",
"_errors2",
"/=",
"sums",
"**",
"2",
"# TODO: Does this make sense?",
"return",
"col"
] | Normalize each bin in the collection so that the sum is 1.0 for each bin.
Note: If a bin is zero in all collections, the result will be inf. | [
"Normalize",
"each",
"bin",
"in",
"the",
"collection",
"so",
"that",
"the",
"sum",
"is",
"1",
".",
"0",
"for",
"each",
"bin",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_collection.py#L113-L124 | train |
janpipek/physt | physt/histogram_collection.py | HistogramCollection.multi_h1 | def multi_h1(cls, a_dict: Dict[str, Any], bins=None, **kwargs) -> "HistogramCollection":
"""Create a collection from multiple datasets."""
from physt.binnings import calculate_bins
mega_values = np.concatenate(list(a_dict.values()))
binning = calculate_bins(mega_values, bins, **kwargs)
title = kwargs.pop("title", None)
name = kwargs.pop("name", None)
collection = HistogramCollection(binning=binning, title=title, name=name)
for key, value in a_dict.items():
collection.create(key, value)
return collection | python | def multi_h1(cls, a_dict: Dict[str, Any], bins=None, **kwargs) -> "HistogramCollection":
"""Create a collection from multiple datasets."""
from physt.binnings import calculate_bins
mega_values = np.concatenate(list(a_dict.values()))
binning = calculate_bins(mega_values, bins, **kwargs)
title = kwargs.pop("title", None)
name = kwargs.pop("name", None)
collection = HistogramCollection(binning=binning, title=title, name=name)
for key, value in a_dict.items():
collection.create(key, value)
return collection | [
"def",
"multi_h1",
"(",
"cls",
",",
"a_dict",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"bins",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"\"HistogramCollection\"",
":",
"from",
"physt",
".",
"binnings",
"import",
"calculate_bins",
"mega_values",
"=",
"np",
".",
"concatenate",
"(",
"list",
"(",
"a_dict",
".",
"values",
"(",
")",
")",
")",
"binning",
"=",
"calculate_bins",
"(",
"mega_values",
",",
"bins",
",",
"*",
"*",
"kwargs",
")",
"title",
"=",
"kwargs",
".",
"pop",
"(",
"\"title\"",
",",
"None",
")",
"name",
"=",
"kwargs",
".",
"pop",
"(",
"\"name\"",
",",
"None",
")",
"collection",
"=",
"HistogramCollection",
"(",
"binning",
"=",
"binning",
",",
"title",
"=",
"title",
",",
"name",
"=",
"name",
")",
"for",
"key",
",",
"value",
"in",
"a_dict",
".",
"items",
"(",
")",
":",
"collection",
".",
"create",
"(",
"key",
",",
"value",
")",
"return",
"collection"
] | Create a collection from multiple datasets. | [
"Create",
"a",
"collection",
"from",
"multiple",
"datasets",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_collection.py#L142-L154 | train |
janpipek/physt | physt/histogram_collection.py | HistogramCollection.to_json | def to_json(self, path: Optional[str] = None, **kwargs) -> str:
"""Convert to JSON representation.
Parameters
----------
path: Where to write the JSON.
Returns
-------
The JSON representation.
"""
from .io import save_json
return save_json(self, path, **kwargs) | python | def to_json(self, path: Optional[str] = None, **kwargs) -> str:
"""Convert to JSON representation.
Parameters
----------
path: Where to write the JSON.
Returns
-------
The JSON representation.
"""
from .io import save_json
return save_json(self, path, **kwargs) | [
"def",
"to_json",
"(",
"self",
",",
"path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"str",
":",
"from",
".",
"io",
"import",
"save_json",
"return",
"save_json",
"(",
"self",
",",
"path",
",",
"*",
"*",
"kwargs",
")"
] | Convert to JSON representation.
Parameters
----------
path: Where to write the JSON.
Returns
-------
The JSON representation. | [
"Convert",
"to",
"JSON",
"representation",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_collection.py#L171-L183 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase._get_axis | def _get_axis(self, name_or_index: AxisIdentifier) -> int:
"""Get a zero-based index of an axis and check its existence."""
# TODO: Add unit test
if isinstance(name_or_index, int):
if name_or_index < 0 or name_or_index >= self.ndim:
raise ValueError("No such axis, must be from 0 to {0}".format(self.ndim-1))
return name_or_index
elif isinstance(name_or_index, str):
if name_or_index not in self.axis_names:
named_axes = [name for name in self.axis_names if name]
raise ValueError("No axis with such name: {0}, available names: {1}. In most places, you can also use numbers."
.format(name_or_index, ", ".join(named_axes)))
return self.axis_names.index(name_or_index)
else:
raise TypeError("Argument of type {0} not understood, int or str expected.".format(type(name_or_index))) | python | def _get_axis(self, name_or_index: AxisIdentifier) -> int:
"""Get a zero-based index of an axis and check its existence."""
# TODO: Add unit test
if isinstance(name_or_index, int):
if name_or_index < 0 or name_or_index >= self.ndim:
raise ValueError("No such axis, must be from 0 to {0}".format(self.ndim-1))
return name_or_index
elif isinstance(name_or_index, str):
if name_or_index not in self.axis_names:
named_axes = [name for name in self.axis_names if name]
raise ValueError("No axis with such name: {0}, available names: {1}. In most places, you can also use numbers."
.format(name_or_index, ", ".join(named_axes)))
return self.axis_names.index(name_or_index)
else:
raise TypeError("Argument of type {0} not understood, int or str expected.".format(type(name_or_index))) | [
"def",
"_get_axis",
"(",
"self",
",",
"name_or_index",
":",
"AxisIdentifier",
")",
"->",
"int",
":",
"# TODO: Add unit test",
"if",
"isinstance",
"(",
"name_or_index",
",",
"int",
")",
":",
"if",
"name_or_index",
"<",
"0",
"or",
"name_or_index",
">=",
"self",
".",
"ndim",
":",
"raise",
"ValueError",
"(",
"\"No such axis, must be from 0 to {0}\"",
".",
"format",
"(",
"self",
".",
"ndim",
"-",
"1",
")",
")",
"return",
"name_or_index",
"elif",
"isinstance",
"(",
"name_or_index",
",",
"str",
")",
":",
"if",
"name_or_index",
"not",
"in",
"self",
".",
"axis_names",
":",
"named_axes",
"=",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"axis_names",
"if",
"name",
"]",
"raise",
"ValueError",
"(",
"\"No axis with such name: {0}, available names: {1}. In most places, you can also use numbers.\"",
".",
"format",
"(",
"name_or_index",
",",
"\", \"",
".",
"join",
"(",
"named_axes",
")",
")",
")",
"return",
"self",
".",
"axis_names",
".",
"index",
"(",
"name_or_index",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Argument of type {0} not understood, int or str expected.\"",
".",
"format",
"(",
"type",
"(",
"name_or_index",
")",
")",
")"
] | Get a zero-based index of an axis and check its existence. | [
"Get",
"a",
"zero",
"-",
"based",
"index",
"of",
"an",
"axis",
"and",
"check",
"its",
"existence",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L177-L191 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase.shape | def shape(self) -> Tuple[int, ...]:
"""Shape of histogram's data.
Returns
-------
One-element tuple with the number of bins along each axis.
"""
return tuple(bins.bin_count for bins in self._binnings) | python | def shape(self) -> Tuple[int, ...]:
"""Shape of histogram's data.
Returns
-------
One-element tuple with the number of bins along each axis.
"""
return tuple(bins.bin_count for bins in self._binnings) | [
"def",
"shape",
"(",
"self",
")",
"->",
"Tuple",
"[",
"int",
",",
"...",
"]",
":",
"return",
"tuple",
"(",
"bins",
".",
"bin_count",
"for",
"bins",
"in",
"self",
".",
"_binnings",
")"
] | Shape of histogram's data.
Returns
-------
One-element tuple with the number of bins along each axis. | [
"Shape",
"of",
"histogram",
"s",
"data",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L194-L201 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase.set_dtype | def set_dtype(self, value, check: bool = True):
"""Change data type of the bin contents.
Allowed conversions:
- from integral to float types
- between the same category of type (float/integer)
- from float types to integer if weights are trivial
Parameters
----------
value: np.dtype or something convertible to it.
check: bool
If True (default), all values are checked against the limits
"""
# TODO? Deal with unsigned types
value, type_info = self._eval_dtype(value)
if value == self._dtype:
return
if self.dtype is None or np.can_cast(self.dtype, value):
pass # Ok
elif check:
if np.issubdtype(value, np.integer):
if self.dtype.kind == "f":
for array in (self._frequencies, self._errors2):
if np.any(array % 1.0):
raise RuntimeError("Data contain non-integer values.")
for array in (self._frequencies, self._errors2):
if np.any((array > type_info.max) | (array < type_info.min)):
raise RuntimeError("Data contain values outside the specified range.")
self._dtype = value
self._frequencies = self._frequencies.astype(value)
self._errors2 = self._errors2.astype(value)
self._missed = self._missed.astype(value) | python | def set_dtype(self, value, check: bool = True):
"""Change data type of the bin contents.
Allowed conversions:
- from integral to float types
- between the same category of type (float/integer)
- from float types to integer if weights are trivial
Parameters
----------
value: np.dtype or something convertible to it.
check: bool
If True (default), all values are checked against the limits
"""
# TODO? Deal with unsigned types
value, type_info = self._eval_dtype(value)
if value == self._dtype:
return
if self.dtype is None or np.can_cast(self.dtype, value):
pass # Ok
elif check:
if np.issubdtype(value, np.integer):
if self.dtype.kind == "f":
for array in (self._frequencies, self._errors2):
if np.any(array % 1.0):
raise RuntimeError("Data contain non-integer values.")
for array in (self._frequencies, self._errors2):
if np.any((array > type_info.max) | (array < type_info.min)):
raise RuntimeError("Data contain values outside the specified range.")
self._dtype = value
self._frequencies = self._frequencies.astype(value)
self._errors2 = self._errors2.astype(value)
self._missed = self._missed.astype(value) | [
"def",
"set_dtype",
"(",
"self",
",",
"value",
",",
"check",
":",
"bool",
"=",
"True",
")",
":",
"# TODO? Deal with unsigned types",
"value",
",",
"type_info",
"=",
"self",
".",
"_eval_dtype",
"(",
"value",
")",
"if",
"value",
"==",
"self",
".",
"_dtype",
":",
"return",
"if",
"self",
".",
"dtype",
"is",
"None",
"or",
"np",
".",
"can_cast",
"(",
"self",
".",
"dtype",
",",
"value",
")",
":",
"pass",
"# Ok",
"elif",
"check",
":",
"if",
"np",
".",
"issubdtype",
"(",
"value",
",",
"np",
".",
"integer",
")",
":",
"if",
"self",
".",
"dtype",
".",
"kind",
"==",
"\"f\"",
":",
"for",
"array",
"in",
"(",
"self",
".",
"_frequencies",
",",
"self",
".",
"_errors2",
")",
":",
"if",
"np",
".",
"any",
"(",
"array",
"%",
"1.0",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Data contain non-integer values.\"",
")",
"for",
"array",
"in",
"(",
"self",
".",
"_frequencies",
",",
"self",
".",
"_errors2",
")",
":",
"if",
"np",
".",
"any",
"(",
"(",
"array",
">",
"type_info",
".",
"max",
")",
"|",
"(",
"array",
"<",
"type_info",
".",
"min",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Data contain values outside the specified range.\"",
")",
"self",
".",
"_dtype",
"=",
"value",
"self",
".",
"_frequencies",
"=",
"self",
".",
"_frequencies",
".",
"astype",
"(",
"value",
")",
"self",
".",
"_errors2",
"=",
"self",
".",
"_errors2",
".",
"astype",
"(",
"value",
")",
"self",
".",
"_missed",
"=",
"self",
".",
"_missed",
".",
"astype",
"(",
"value",
")"
] | Change data type of the bin contents.
Allowed conversions:
- from integral to float types
- between the same category of type (float/integer)
- from float types to integer if weights are trivial
Parameters
----------
value: np.dtype or something convertible to it.
check: bool
If True (default), all values are checked against the limits | [
"Change",
"data",
"type",
"of",
"the",
"bin",
"contents",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L244-L278 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase._coerce_dtype | def _coerce_dtype(self, other_dtype):
"""Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type
"""
if self._dtype is None:
new_dtype = np.dtype(other_dtype)
else:
new_dtype = np.find_common_type([self._dtype, np.dtype(other_dtype)], [])
if new_dtype != self.dtype:
self.set_dtype(new_dtype) | python | def _coerce_dtype(self, other_dtype):
"""Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type
"""
if self._dtype is None:
new_dtype = np.dtype(other_dtype)
else:
new_dtype = np.find_common_type([self._dtype, np.dtype(other_dtype)], [])
if new_dtype != self.dtype:
self.set_dtype(new_dtype) | [
"def",
"_coerce_dtype",
"(",
"self",
",",
"other_dtype",
")",
":",
"if",
"self",
".",
"_dtype",
"is",
"None",
":",
"new_dtype",
"=",
"np",
".",
"dtype",
"(",
"other_dtype",
")",
"else",
":",
"new_dtype",
"=",
"np",
".",
"find_common_type",
"(",
"[",
"self",
".",
"_dtype",
",",
"np",
".",
"dtype",
"(",
"other_dtype",
")",
"]",
",",
"[",
"]",
")",
"if",
"new_dtype",
"!=",
"self",
".",
"dtype",
":",
"self",
".",
"set_dtype",
"(",
"new_dtype",
")"
] | Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type | [
"Possibly",
"change",
"the",
"bin",
"content",
"type",
"to",
"allow",
"correct",
"operations",
"with",
"other",
"operand",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L282-L294 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase.normalize | def normalize(self, inplace: bool = False, percent: bool = False) -> "HistogramBase":
"""Normalize the histogram, so that the total weight is equal to 1.
Parameters
----------
inplace: If True, updates itself. If False (default), returns copy
percent: If True, normalizes to percent instead of 1. Default: False
Returns
-------
HistogramBase : either modified copy or self
See also
--------
densities
HistogramND.partial_normalize
"""
if inplace:
self /= self.total * (.01 if percent else 1)
return self
else:
return self / self.total * (100 if percent else 1) | python | def normalize(self, inplace: bool = False, percent: bool = False) -> "HistogramBase":
"""Normalize the histogram, so that the total weight is equal to 1.
Parameters
----------
inplace: If True, updates itself. If False (default), returns copy
percent: If True, normalizes to percent instead of 1. Default: False
Returns
-------
HistogramBase : either modified copy or self
See also
--------
densities
HistogramND.partial_normalize
"""
if inplace:
self /= self.total * (.01 if percent else 1)
return self
else:
return self / self.total * (100 if percent else 1) | [
"def",
"normalize",
"(",
"self",
",",
"inplace",
":",
"bool",
"=",
"False",
",",
"percent",
":",
"bool",
"=",
"False",
")",
"->",
"\"HistogramBase\"",
":",
"if",
"inplace",
":",
"self",
"/=",
"self",
".",
"total",
"*",
"(",
".01",
"if",
"percent",
"else",
"1",
")",
"return",
"self",
"else",
":",
"return",
"self",
"/",
"self",
".",
"total",
"*",
"(",
"100",
"if",
"percent",
"else",
"1",
")"
] | Normalize the histogram, so that the total weight is equal to 1.
Parameters
----------
inplace: If True, updates itself. If False (default), returns copy
percent: If True, normalizes to percent instead of 1. Default: False
Returns
-------
HistogramBase : either modified copy or self
See also
--------
densities
HistogramND.partial_normalize | [
"Normalize",
"the",
"histogram",
"so",
"that",
"the",
"total",
"weight",
"is",
"equal",
"to",
"1",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L314-L335 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase._change_binning | def _change_binning(self, new_binning, bin_map: Iterable[Tuple[int, int]], axis: int = 0):
"""Set new binnning and update the bin contents according to a map.
Fills frequencies and errors with 0.
It's the caller's responsibility to provide correct binning and map.
Parameters
----------
new_binning: physt.binnings.BinningBase
bin_map: Iterable[tuple]
tuples contain bin indices (old, new)
axis: int
What axis does the binning describe(0..ndim-1)
"""
axis = int(axis)
if axis < 0 or axis >= self.ndim:
raise RuntimeError("Axis must be in range 0..(ndim-1)")
self._reshape_data(new_binning.bin_count, bin_map, axis)
self._binnings[axis] = new_binning | python | def _change_binning(self, new_binning, bin_map: Iterable[Tuple[int, int]], axis: int = 0):
"""Set new binnning and update the bin contents according to a map.
Fills frequencies and errors with 0.
It's the caller's responsibility to provide correct binning and map.
Parameters
----------
new_binning: physt.binnings.BinningBase
bin_map: Iterable[tuple]
tuples contain bin indices (old, new)
axis: int
What axis does the binning describe(0..ndim-1)
"""
axis = int(axis)
if axis < 0 or axis >= self.ndim:
raise RuntimeError("Axis must be in range 0..(ndim-1)")
self._reshape_data(new_binning.bin_count, bin_map, axis)
self._binnings[axis] = new_binning | [
"def",
"_change_binning",
"(",
"self",
",",
"new_binning",
",",
"bin_map",
":",
"Iterable",
"[",
"Tuple",
"[",
"int",
",",
"int",
"]",
"]",
",",
"axis",
":",
"int",
"=",
"0",
")",
":",
"axis",
"=",
"int",
"(",
"axis",
")",
"if",
"axis",
"<",
"0",
"or",
"axis",
">=",
"self",
".",
"ndim",
":",
"raise",
"RuntimeError",
"(",
"\"Axis must be in range 0..(ndim-1)\"",
")",
"self",
".",
"_reshape_data",
"(",
"new_binning",
".",
"bin_count",
",",
"bin_map",
",",
"axis",
")",
"self",
".",
"_binnings",
"[",
"axis",
"]",
"=",
"new_binning"
] | Set new binnning and update the bin contents according to a map.
Fills frequencies and errors with 0.
It's the caller's responsibility to provide correct binning and map.
Parameters
----------
new_binning: physt.binnings.BinningBase
bin_map: Iterable[tuple]
tuples contain bin indices (old, new)
axis: int
What axis does the binning describe(0..ndim-1) | [
"Set",
"new",
"binnning",
"and",
"update",
"the",
"bin",
"contents",
"according",
"to",
"a",
"map",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L386-L404 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase._reshape_data | def _reshape_data(self, new_size, bin_map, axis=0):
"""Reshape data to match new binning schema.
Fills frequencies and errors with 0.
Parameters
----------
new_size: int
bin_map: Iterable[(old, new)] or int or None
If None, we can keep the data unchanged.
If int, it is offset by which to shift the data (can be 0)
If iterable, pairs specify which old bin should go into which new bin
axis: int
On which axis to apply
"""
if bin_map is None:
return
else:
new_shape = list(self.shape)
new_shape[axis] = new_size
new_frequencies = np.zeros(new_shape, dtype=self._frequencies.dtype)
new_errors2 = np.zeros(new_shape, dtype=self._frequencies.dtype)
self._apply_bin_map(
old_frequencies=self._frequencies, new_frequencies=new_frequencies,
old_errors2=self._errors2, new_errors2=new_errors2,
bin_map=bin_map, axis=axis)
self._frequencies = new_frequencies
self._errors2 = new_errors2 | python | def _reshape_data(self, new_size, bin_map, axis=0):
"""Reshape data to match new binning schema.
Fills frequencies and errors with 0.
Parameters
----------
new_size: int
bin_map: Iterable[(old, new)] or int or None
If None, we can keep the data unchanged.
If int, it is offset by which to shift the data (can be 0)
If iterable, pairs specify which old bin should go into which new bin
axis: int
On which axis to apply
"""
if bin_map is None:
return
else:
new_shape = list(self.shape)
new_shape[axis] = new_size
new_frequencies = np.zeros(new_shape, dtype=self._frequencies.dtype)
new_errors2 = np.zeros(new_shape, dtype=self._frequencies.dtype)
self._apply_bin_map(
old_frequencies=self._frequencies, new_frequencies=new_frequencies,
old_errors2=self._errors2, new_errors2=new_errors2,
bin_map=bin_map, axis=axis)
self._frequencies = new_frequencies
self._errors2 = new_errors2 | [
"def",
"_reshape_data",
"(",
"self",
",",
"new_size",
",",
"bin_map",
",",
"axis",
"=",
"0",
")",
":",
"if",
"bin_map",
"is",
"None",
":",
"return",
"else",
":",
"new_shape",
"=",
"list",
"(",
"self",
".",
"shape",
")",
"new_shape",
"[",
"axis",
"]",
"=",
"new_size",
"new_frequencies",
"=",
"np",
".",
"zeros",
"(",
"new_shape",
",",
"dtype",
"=",
"self",
".",
"_frequencies",
".",
"dtype",
")",
"new_errors2",
"=",
"np",
".",
"zeros",
"(",
"new_shape",
",",
"dtype",
"=",
"self",
".",
"_frequencies",
".",
"dtype",
")",
"self",
".",
"_apply_bin_map",
"(",
"old_frequencies",
"=",
"self",
".",
"_frequencies",
",",
"new_frequencies",
"=",
"new_frequencies",
",",
"old_errors2",
"=",
"self",
".",
"_errors2",
",",
"new_errors2",
"=",
"new_errors2",
",",
"bin_map",
"=",
"bin_map",
",",
"axis",
"=",
"axis",
")",
"self",
".",
"_frequencies",
"=",
"new_frequencies",
"self",
".",
"_errors2",
"=",
"new_errors2"
] | Reshape data to match new binning schema.
Fills frequencies and errors with 0.
Parameters
----------
new_size: int
bin_map: Iterable[(old, new)] or int or None
If None, we can keep the data unchanged.
If int, it is offset by which to shift the data (can be 0)
If iterable, pairs specify which old bin should go into which new bin
axis: int
On which axis to apply | [
"Reshape",
"data",
"to",
"match",
"new",
"binning",
"schema",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L455-L482 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase._apply_bin_map | def _apply_bin_map(self, old_frequencies, new_frequencies, old_errors2,
new_errors2, bin_map, axis=0):
"""Fill new data arrays using a map.
Parameters
----------
old_frequencies : np.ndarray
Source of frequencies data
new_frequencies : np.ndarray
Target of frequencies data
old_errors2 : np.ndarray
Source of errors data
new_errors2 : np.ndarray
Target of errors data
bin_map: Iterable[(old, new)] or int or None
As in _reshape_data
axis: int
On which axis to apply
See also
--------
HistogramBase._reshape_data
"""
if old_frequencies is not None and old_frequencies.shape[axis] > 0:
if isinstance(bin_map, int):
new_index = [slice(None) for i in range(self.ndim)]
new_index[axis] = slice(bin_map, bin_map + old_frequencies.shape[axis])
new_frequencies[tuple(new_index)] += old_frequencies
new_errors2[tuple(new_index)] += old_errors2
else:
for (old, new) in bin_map: # Generic enough
new_index = [slice(None) for i in range(self.ndim)]
new_index[axis] = new
old_index = [slice(None) for i in range(self.ndim)]
old_index[axis] = old
new_frequencies[tuple(new_index)] += old_frequencies[tuple(old_index)]
new_errors2[tuple(new_index)] += old_errors2[tuple(old_index)] | python | def _apply_bin_map(self, old_frequencies, new_frequencies, old_errors2,
new_errors2, bin_map, axis=0):
"""Fill new data arrays using a map.
Parameters
----------
old_frequencies : np.ndarray
Source of frequencies data
new_frequencies : np.ndarray
Target of frequencies data
old_errors2 : np.ndarray
Source of errors data
new_errors2 : np.ndarray
Target of errors data
bin_map: Iterable[(old, new)] or int or None
As in _reshape_data
axis: int
On which axis to apply
See also
--------
HistogramBase._reshape_data
"""
if old_frequencies is not None and old_frequencies.shape[axis] > 0:
if isinstance(bin_map, int):
new_index = [slice(None) for i in range(self.ndim)]
new_index[axis] = slice(bin_map, bin_map + old_frequencies.shape[axis])
new_frequencies[tuple(new_index)] += old_frequencies
new_errors2[tuple(new_index)] += old_errors2
else:
for (old, new) in bin_map: # Generic enough
new_index = [slice(None) for i in range(self.ndim)]
new_index[axis] = new
old_index = [slice(None) for i in range(self.ndim)]
old_index[axis] = old
new_frequencies[tuple(new_index)] += old_frequencies[tuple(old_index)]
new_errors2[tuple(new_index)] += old_errors2[tuple(old_index)] | [
"def",
"_apply_bin_map",
"(",
"self",
",",
"old_frequencies",
",",
"new_frequencies",
",",
"old_errors2",
",",
"new_errors2",
",",
"bin_map",
",",
"axis",
"=",
"0",
")",
":",
"if",
"old_frequencies",
"is",
"not",
"None",
"and",
"old_frequencies",
".",
"shape",
"[",
"axis",
"]",
">",
"0",
":",
"if",
"isinstance",
"(",
"bin_map",
",",
"int",
")",
":",
"new_index",
"=",
"[",
"slice",
"(",
"None",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndim",
")",
"]",
"new_index",
"[",
"axis",
"]",
"=",
"slice",
"(",
"bin_map",
",",
"bin_map",
"+",
"old_frequencies",
".",
"shape",
"[",
"axis",
"]",
")",
"new_frequencies",
"[",
"tuple",
"(",
"new_index",
")",
"]",
"+=",
"old_frequencies",
"new_errors2",
"[",
"tuple",
"(",
"new_index",
")",
"]",
"+=",
"old_errors2",
"else",
":",
"for",
"(",
"old",
",",
"new",
")",
"in",
"bin_map",
":",
"# Generic enough",
"new_index",
"=",
"[",
"slice",
"(",
"None",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndim",
")",
"]",
"new_index",
"[",
"axis",
"]",
"=",
"new",
"old_index",
"=",
"[",
"slice",
"(",
"None",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndim",
")",
"]",
"old_index",
"[",
"axis",
"]",
"=",
"old",
"new_frequencies",
"[",
"tuple",
"(",
"new_index",
")",
"]",
"+=",
"old_frequencies",
"[",
"tuple",
"(",
"old_index",
")",
"]",
"new_errors2",
"[",
"tuple",
"(",
"new_index",
")",
"]",
"+=",
"old_errors2",
"[",
"tuple",
"(",
"old_index",
")",
"]"
] | Fill new data arrays using a map.
Parameters
----------
old_frequencies : np.ndarray
Source of frequencies data
new_frequencies : np.ndarray
Target of frequencies data
old_errors2 : np.ndarray
Source of errors data
new_errors2 : np.ndarray
Target of errors data
bin_map: Iterable[(old, new)] or int or None
As in _reshape_data
axis: int
On which axis to apply
See also
--------
HistogramBase._reshape_data | [
"Fill",
"new",
"data",
"arrays",
"using",
"a",
"map",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L484-L520 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase.has_same_bins | def has_same_bins(self, other: "HistogramBase") -> bool:
"""Whether two histograms share the same binning."""
if self.shape != other.shape:
return False
elif self.ndim == 1:
return np.allclose(self.bins, other.bins)
elif self.ndim > 1:
for i in range(self.ndim):
if not np.allclose(self.bins[i], other.bins[i]):
return False
return True | python | def has_same_bins(self, other: "HistogramBase") -> bool:
"""Whether two histograms share the same binning."""
if self.shape != other.shape:
return False
elif self.ndim == 1:
return np.allclose(self.bins, other.bins)
elif self.ndim > 1:
for i in range(self.ndim):
if not np.allclose(self.bins[i], other.bins[i]):
return False
return True | [
"def",
"has_same_bins",
"(",
"self",
",",
"other",
":",
"\"HistogramBase\"",
")",
"->",
"bool",
":",
"if",
"self",
".",
"shape",
"!=",
"other",
".",
"shape",
":",
"return",
"False",
"elif",
"self",
".",
"ndim",
"==",
"1",
":",
"return",
"np",
".",
"allclose",
"(",
"self",
".",
"bins",
",",
"other",
".",
"bins",
")",
"elif",
"self",
".",
"ndim",
">",
"1",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndim",
")",
":",
"if",
"not",
"np",
".",
"allclose",
"(",
"self",
".",
"bins",
"[",
"i",
"]",
",",
"other",
".",
"bins",
"[",
"i",
"]",
")",
":",
"return",
"False",
"return",
"True"
] | Whether two histograms share the same binning. | [
"Whether",
"two",
"histograms",
"share",
"the",
"same",
"binning",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L522-L532 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase.copy | def copy(self, include_frequencies: bool = True) -> "HistogramBase":
"""Copy the histogram.
Parameters
----------
include_frequencies : If false, all frequencies are set to zero.
"""
if include_frequencies:
frequencies = np.copy(self.frequencies)
missed = self._missed.copy()
errors2 = np.copy(self.errors2)
stats = self._stats or None
else:
frequencies = np.zeros_like(self._frequencies)
errors2 = np.zeros_like(self._errors2)
missed = np.zeros_like(self._missed)
stats = None
a_copy = self.__class__.__new__(self.__class__)
a_copy._binnings = [binning.copy() for binning in self._binnings]
a_copy._dtype = self.dtype
a_copy._frequencies = frequencies
a_copy._errors2 = errors2
a_copy._meta_data = self._meta_data.copy()
a_copy.keep_missed = self.keep_missed
a_copy._missed = missed
a_copy._stats = stats
return a_copy | python | def copy(self, include_frequencies: bool = True) -> "HistogramBase":
"""Copy the histogram.
Parameters
----------
include_frequencies : If false, all frequencies are set to zero.
"""
if include_frequencies:
frequencies = np.copy(self.frequencies)
missed = self._missed.copy()
errors2 = np.copy(self.errors2)
stats = self._stats or None
else:
frequencies = np.zeros_like(self._frequencies)
errors2 = np.zeros_like(self._errors2)
missed = np.zeros_like(self._missed)
stats = None
a_copy = self.__class__.__new__(self.__class__)
a_copy._binnings = [binning.copy() for binning in self._binnings]
a_copy._dtype = self.dtype
a_copy._frequencies = frequencies
a_copy._errors2 = errors2
a_copy._meta_data = self._meta_data.copy()
a_copy.keep_missed = self.keep_missed
a_copy._missed = missed
a_copy._stats = stats
return a_copy | [
"def",
"copy",
"(",
"self",
",",
"include_frequencies",
":",
"bool",
"=",
"True",
")",
"->",
"\"HistogramBase\"",
":",
"if",
"include_frequencies",
":",
"frequencies",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"frequencies",
")",
"missed",
"=",
"self",
".",
"_missed",
".",
"copy",
"(",
")",
"errors2",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"errors2",
")",
"stats",
"=",
"self",
".",
"_stats",
"or",
"None",
"else",
":",
"frequencies",
"=",
"np",
".",
"zeros_like",
"(",
"self",
".",
"_frequencies",
")",
"errors2",
"=",
"np",
".",
"zeros_like",
"(",
"self",
".",
"_errors2",
")",
"missed",
"=",
"np",
".",
"zeros_like",
"(",
"self",
".",
"_missed",
")",
"stats",
"=",
"None",
"a_copy",
"=",
"self",
".",
"__class__",
".",
"__new__",
"(",
"self",
".",
"__class__",
")",
"a_copy",
".",
"_binnings",
"=",
"[",
"binning",
".",
"copy",
"(",
")",
"for",
"binning",
"in",
"self",
".",
"_binnings",
"]",
"a_copy",
".",
"_dtype",
"=",
"self",
".",
"dtype",
"a_copy",
".",
"_frequencies",
"=",
"frequencies",
"a_copy",
".",
"_errors2",
"=",
"errors2",
"a_copy",
".",
"_meta_data",
"=",
"self",
".",
"_meta_data",
".",
"copy",
"(",
")",
"a_copy",
".",
"keep_missed",
"=",
"self",
".",
"keep_missed",
"a_copy",
".",
"_missed",
"=",
"missed",
"a_copy",
".",
"_stats",
"=",
"stats",
"return",
"a_copy"
] | Copy the histogram.
Parameters
----------
include_frequencies : If false, all frequencies are set to zero. | [
"Copy",
"the",
"histogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L534-L560 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase.to_dict | def to_dict(self) -> OrderedDict:
"""Dictionary with all data in the histogram.
This is used for export into various formats (e.g. JSON)
If a descendant class needs to update the dictionary in some way
(put some more information), override the _update_dict method.
"""
result = OrderedDict()
result["histogram_type"] = type(self).__name__
result["binnings"] = [binning.to_dict() for binning in self._binnings]
result["frequencies"] = self.frequencies.tolist()
result["dtype"] = str(np.dtype(self.dtype))
# TODO: Optimize for _errors == _frequencies
result["errors2"] = self.errors2.tolist()
result["meta_data"] = self._meta_data
result["missed"] = self._missed.tolist()
result["missed_keep"] = self.keep_missed
self._update_dict(result)
return result | python | def to_dict(self) -> OrderedDict:
"""Dictionary with all data in the histogram.
This is used for export into various formats (e.g. JSON)
If a descendant class needs to update the dictionary in some way
(put some more information), override the _update_dict method.
"""
result = OrderedDict()
result["histogram_type"] = type(self).__name__
result["binnings"] = [binning.to_dict() for binning in self._binnings]
result["frequencies"] = self.frequencies.tolist()
result["dtype"] = str(np.dtype(self.dtype))
# TODO: Optimize for _errors == _frequencies
result["errors2"] = self.errors2.tolist()
result["meta_data"] = self._meta_data
result["missed"] = self._missed.tolist()
result["missed_keep"] = self.keep_missed
self._update_dict(result)
return result | [
"def",
"to_dict",
"(",
"self",
")",
"->",
"OrderedDict",
":",
"result",
"=",
"OrderedDict",
"(",
")",
"result",
"[",
"\"histogram_type\"",
"]",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"result",
"[",
"\"binnings\"",
"]",
"=",
"[",
"binning",
".",
"to_dict",
"(",
")",
"for",
"binning",
"in",
"self",
".",
"_binnings",
"]",
"result",
"[",
"\"frequencies\"",
"]",
"=",
"self",
".",
"frequencies",
".",
"tolist",
"(",
")",
"result",
"[",
"\"dtype\"",
"]",
"=",
"str",
"(",
"np",
".",
"dtype",
"(",
"self",
".",
"dtype",
")",
")",
"# TODO: Optimize for _errors == _frequencies",
"result",
"[",
"\"errors2\"",
"]",
"=",
"self",
".",
"errors2",
".",
"tolist",
"(",
")",
"result",
"[",
"\"meta_data\"",
"]",
"=",
"self",
".",
"_meta_data",
"result",
"[",
"\"missed\"",
"]",
"=",
"self",
".",
"_missed",
".",
"tolist",
"(",
")",
"result",
"[",
"\"missed_keep\"",
"]",
"=",
"self",
".",
"keep_missed",
"self",
".",
"_update_dict",
"(",
"result",
")",
"return",
"result"
] | Dictionary with all data in the histogram.
This is used for export into various formats (e.g. JSON)
If a descendant class needs to update the dictionary in some way
(put some more information), override the _update_dict method. | [
"Dictionary",
"with",
"all",
"data",
"in",
"the",
"histogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L619-L638 | train |
janpipek/physt | physt/histogram_base.py | HistogramBase._merge_meta_data | def _merge_meta_data(cls, first: "HistogramBase", second: "HistogramBase") -> dict:
"""Merge meta data of two histograms leaving only the equal values.
(Used in addition and subtraction)
"""
keys = set(first._meta_data.keys())
keys = keys.union(set(second._meta_data.keys()))
return {key:
(first._meta_data.get(key, None) if first._meta_data.get(key, None) == second._meta_data.get(key, None) else None)
for key in keys} | python | def _merge_meta_data(cls, first: "HistogramBase", second: "HistogramBase") -> dict:
"""Merge meta data of two histograms leaving only the equal values.
(Used in addition and subtraction)
"""
keys = set(first._meta_data.keys())
keys = keys.union(set(second._meta_data.keys()))
return {key:
(first._meta_data.get(key, None) if first._meta_data.get(key, None) == second._meta_data.get(key, None) else None)
for key in keys} | [
"def",
"_merge_meta_data",
"(",
"cls",
",",
"first",
":",
"\"HistogramBase\"",
",",
"second",
":",
"\"HistogramBase\"",
")",
"->",
"dict",
":",
"keys",
"=",
"set",
"(",
"first",
".",
"_meta_data",
".",
"keys",
"(",
")",
")",
"keys",
"=",
"keys",
".",
"union",
"(",
"set",
"(",
"second",
".",
"_meta_data",
".",
"keys",
"(",
")",
")",
")",
"return",
"{",
"key",
":",
"(",
"first",
".",
"_meta_data",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"first",
".",
"_meta_data",
".",
"get",
"(",
"key",
",",
"None",
")",
"==",
"second",
".",
"_meta_data",
".",
"get",
"(",
"key",
",",
"None",
")",
"else",
"None",
")",
"for",
"key",
"in",
"keys",
"}"
] | Merge meta data of two histograms leaving only the equal values.
(Used in addition and subtraction) | [
"Merge",
"meta",
"data",
"of",
"two",
"histograms",
"leaving",
"only",
"the",
"equal",
"values",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L816-L825 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.mean | def mean(self) -> Optional[float]:
"""Statistical mean of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
"""
if self._stats: # TODO: should be true always?
if self.total > 0:
return self._stats["sum"] / self.total
else:
return np.nan
else:
return None | python | def mean(self) -> Optional[float]:
"""Statistical mean of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
"""
if self._stats: # TODO: should be true always?
if self.total > 0:
return self._stats["sum"] / self.total
else:
return np.nan
else:
return None | [
"def",
"mean",
"(",
"self",
")",
"->",
"Optional",
"[",
"float",
"]",
":",
"if",
"self",
".",
"_stats",
":",
"# TODO: should be true always?",
"if",
"self",
".",
"total",
">",
"0",
":",
"return",
"self",
".",
"_stats",
"[",
"\"sum\"",
"]",
"/",
"self",
".",
"total",
"else",
":",
"return",
"np",
".",
"nan",
"else",
":",
"return",
"None"
] | Statistical mean of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents. | [
"Statistical",
"mean",
"of",
"all",
"values",
"entered",
"into",
"histogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L208-L220 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.std | def std(self) -> Optional[float]: #, ddof=0):
"""Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
"""
# TODO: Add DOF
if self._stats:
return np.sqrt(self.variance())
else:
return None | python | def std(self) -> Optional[float]: #, ddof=0):
"""Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
"""
# TODO: Add DOF
if self._stats:
return np.sqrt(self.variance())
else:
return None | [
"def",
"std",
"(",
"self",
")",
"->",
"Optional",
"[",
"float",
"]",
":",
"#, ddof=0):",
"# TODO: Add DOF",
"if",
"self",
".",
"_stats",
":",
"return",
"np",
".",
"sqrt",
"(",
"self",
".",
"variance",
"(",
")",
")",
"else",
":",
"return",
"None"
] | Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float | [
"Standard",
"deviation",
"of",
"all",
"values",
"entered",
"into",
"histogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L222-L236 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.variance | def variance(self) -> Optional[float]: #, ddof: int = 0) -> float:
"""Statistical variance of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
"""
# TODO: Add DOF
# http://stats.stackexchange.com/questions/6534/how-do-i-calculate-a-weighted-standard-deviation-in-excel
if self._stats:
if self.total > 0:
return (self._stats["sum2"] - self._stats["sum"] ** 2 / self.total) / self.total
else:
return np.nan
else:
return None | python | def variance(self) -> Optional[float]: #, ddof: int = 0) -> float:
"""Statistical variance of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
"""
# TODO: Add DOF
# http://stats.stackexchange.com/questions/6534/how-do-i-calculate-a-weighted-standard-deviation-in-excel
if self._stats:
if self.total > 0:
return (self._stats["sum2"] - self._stats["sum"] ** 2 / self.total) / self.total
else:
return np.nan
else:
return None | [
"def",
"variance",
"(",
"self",
")",
"->",
"Optional",
"[",
"float",
"]",
":",
"#, ddof: int = 0) -> float:",
"# TODO: Add DOF",
"# http://stats.stackexchange.com/questions/6534/how-do-i-calculate-a-weighted-standard-deviation-in-excel",
"if",
"self",
".",
"_stats",
":",
"if",
"self",
".",
"total",
">",
"0",
":",
"return",
"(",
"self",
".",
"_stats",
"[",
"\"sum2\"",
"]",
"-",
"self",
".",
"_stats",
"[",
"\"sum\"",
"]",
"**",
"2",
"/",
"self",
".",
"total",
")",
"/",
"self",
".",
"total",
"else",
":",
"return",
"np",
".",
"nan",
"else",
":",
"return",
"None"
] | Statistical variance of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float | [
"Statistical",
"variance",
"of",
"all",
"values",
"entered",
"into",
"histogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L238-L256 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.find_bin | def find_bin(self, value):
"""Index of bin corresponding to a value.
Parameters
----------
value: float
Value to be searched for.
Returns
-------
int
index of bin to which value belongs
(-1=underflow, N=overflow, None=not found - inconsecutive)
"""
ixbin = np.searchsorted(self.bin_left_edges, value, side="right")
if ixbin == 0:
return -1
elif ixbin == self.bin_count:
if value <= self.bin_right_edges[-1]:
return ixbin - 1
else:
return self.bin_count
elif value < self.bin_right_edges[ixbin - 1]:
return ixbin - 1
elif ixbin == self.bin_count:
return self.bin_count
else:
return None | python | def find_bin(self, value):
"""Index of bin corresponding to a value.
Parameters
----------
value: float
Value to be searched for.
Returns
-------
int
index of bin to which value belongs
(-1=underflow, N=overflow, None=not found - inconsecutive)
"""
ixbin = np.searchsorted(self.bin_left_edges, value, side="right")
if ixbin == 0:
return -1
elif ixbin == self.bin_count:
if value <= self.bin_right_edges[-1]:
return ixbin - 1
else:
return self.bin_count
elif value < self.bin_right_edges[ixbin - 1]:
return ixbin - 1
elif ixbin == self.bin_count:
return self.bin_count
else:
return None | [
"def",
"find_bin",
"(",
"self",
",",
"value",
")",
":",
"ixbin",
"=",
"np",
".",
"searchsorted",
"(",
"self",
".",
"bin_left_edges",
",",
"value",
",",
"side",
"=",
"\"right\"",
")",
"if",
"ixbin",
"==",
"0",
":",
"return",
"-",
"1",
"elif",
"ixbin",
"==",
"self",
".",
"bin_count",
":",
"if",
"value",
"<=",
"self",
".",
"bin_right_edges",
"[",
"-",
"1",
"]",
":",
"return",
"ixbin",
"-",
"1",
"else",
":",
"return",
"self",
".",
"bin_count",
"elif",
"value",
"<",
"self",
".",
"bin_right_edges",
"[",
"ixbin",
"-",
"1",
"]",
":",
"return",
"ixbin",
"-",
"1",
"elif",
"ixbin",
"==",
"self",
".",
"bin_count",
":",
"return",
"self",
".",
"bin_count",
"else",
":",
"return",
"None"
] | Index of bin corresponding to a value.
Parameters
----------
value: float
Value to be searched for.
Returns
-------
int
index of bin to which value belongs
(-1=underflow, N=overflow, None=not found - inconsecutive) | [
"Index",
"of",
"bin",
"corresponding",
"to",
"a",
"value",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L342-L369 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.fill | def fill(self, value, weight=1):
"""Update histogram with a new value.
Parameters
----------
value: float
Value to be added.
weight: float, optional
Weight assigned to the value.
Returns
-------
int
index of bin which was incremented (-1=underflow, N=overflow, None=not found)
Note: If a gap in unconsecutive bins is matched, underflow & overflow are not valid anymore.
Note: Name was selected because of the eponymous method in ROOT
"""
self._coerce_dtype(type(weight))
if self._binning.is_adaptive():
map = self._binning.force_bin_existence(value)
self._reshape_data(self._binning.bin_count, map)
ixbin = self.find_bin(value)
if ixbin is None:
self.overflow = np.nan
self.underflow = np.nan
elif ixbin == -1 and self.keep_missed:
self.underflow += weight
elif ixbin == self.bin_count and self.keep_missed:
self.overflow += weight
else:
self._frequencies[ixbin] += weight
self._errors2[ixbin] += weight ** 2
if self._stats:
self._stats["sum"] += weight * value
self._stats["sum2"] += weight * value ** 2
return ixbin | python | def fill(self, value, weight=1):
"""Update histogram with a new value.
Parameters
----------
value: float
Value to be added.
weight: float, optional
Weight assigned to the value.
Returns
-------
int
index of bin which was incremented (-1=underflow, N=overflow, None=not found)
Note: If a gap in unconsecutive bins is matched, underflow & overflow are not valid anymore.
Note: Name was selected because of the eponymous method in ROOT
"""
self._coerce_dtype(type(weight))
if self._binning.is_adaptive():
map = self._binning.force_bin_existence(value)
self._reshape_data(self._binning.bin_count, map)
ixbin = self.find_bin(value)
if ixbin is None:
self.overflow = np.nan
self.underflow = np.nan
elif ixbin == -1 and self.keep_missed:
self.underflow += weight
elif ixbin == self.bin_count and self.keep_missed:
self.overflow += weight
else:
self._frequencies[ixbin] += weight
self._errors2[ixbin] += weight ** 2
if self._stats:
self._stats["sum"] += weight * value
self._stats["sum2"] += weight * value ** 2
return ixbin | [
"def",
"fill",
"(",
"self",
",",
"value",
",",
"weight",
"=",
"1",
")",
":",
"self",
".",
"_coerce_dtype",
"(",
"type",
"(",
"weight",
")",
")",
"if",
"self",
".",
"_binning",
".",
"is_adaptive",
"(",
")",
":",
"map",
"=",
"self",
".",
"_binning",
".",
"force_bin_existence",
"(",
"value",
")",
"self",
".",
"_reshape_data",
"(",
"self",
".",
"_binning",
".",
"bin_count",
",",
"map",
")",
"ixbin",
"=",
"self",
".",
"find_bin",
"(",
"value",
")",
"if",
"ixbin",
"is",
"None",
":",
"self",
".",
"overflow",
"=",
"np",
".",
"nan",
"self",
".",
"underflow",
"=",
"np",
".",
"nan",
"elif",
"ixbin",
"==",
"-",
"1",
"and",
"self",
".",
"keep_missed",
":",
"self",
".",
"underflow",
"+=",
"weight",
"elif",
"ixbin",
"==",
"self",
".",
"bin_count",
"and",
"self",
".",
"keep_missed",
":",
"self",
".",
"overflow",
"+=",
"weight",
"else",
":",
"self",
".",
"_frequencies",
"[",
"ixbin",
"]",
"+=",
"weight",
"self",
".",
"_errors2",
"[",
"ixbin",
"]",
"+=",
"weight",
"**",
"2",
"if",
"self",
".",
"_stats",
":",
"self",
".",
"_stats",
"[",
"\"sum\"",
"]",
"+=",
"weight",
"*",
"value",
"self",
".",
"_stats",
"[",
"\"sum2\"",
"]",
"+=",
"weight",
"*",
"value",
"**",
"2",
"return",
"ixbin"
] | Update histogram with a new value.
Parameters
----------
value: float
Value to be added.
weight: float, optional
Weight assigned to the value.
Returns
-------
int
index of bin which was incremented (-1=underflow, N=overflow, None=not found)
Note: If a gap in unconsecutive bins is matched, underflow & overflow are not valid anymore.
Note: Name was selected because of the eponymous method in ROOT | [
"Update",
"histogram",
"with",
"a",
"new",
"value",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L371-L408 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.fill_n | def fill_n(self, values, weights=None, dropna: bool = True):
"""Update histograms with a set of values.
Parameters
----------
values: array_like
weights: Optional[array_like]
drop_na: Optional[bool]
If true (default), all nan's are skipped.
"""
# TODO: Unify with HistogramBase
values = np.asarray(values)
if dropna:
values = values[~np.isnan(values)]
if self._binning.is_adaptive():
map = self._binning.force_bin_existence(values)
self._reshape_data(self._binning.bin_count, map)
if weights:
weights = np.asarray(weights)
self._coerce_dtype(weights.dtype)
(frequencies, errors2, underflow, overflow, stats) = \
calculate_frequencies(values, self._binning, dtype=self.dtype,
weights=weights, validate_bins=False)
self._frequencies += frequencies
self._errors2 += errors2
# TODO: check that adaptive does not produce under-/over-flows?
if self.keep_missed:
self.underflow += underflow
self.overflow += overflow
if self._stats:
for key in self._stats:
self._stats[key] += stats.get(key, 0.0) | python | def fill_n(self, values, weights=None, dropna: bool = True):
"""Update histograms with a set of values.
Parameters
----------
values: array_like
weights: Optional[array_like]
drop_na: Optional[bool]
If true (default), all nan's are skipped.
"""
# TODO: Unify with HistogramBase
values = np.asarray(values)
if dropna:
values = values[~np.isnan(values)]
if self._binning.is_adaptive():
map = self._binning.force_bin_existence(values)
self._reshape_data(self._binning.bin_count, map)
if weights:
weights = np.asarray(weights)
self._coerce_dtype(weights.dtype)
(frequencies, errors2, underflow, overflow, stats) = \
calculate_frequencies(values, self._binning, dtype=self.dtype,
weights=weights, validate_bins=False)
self._frequencies += frequencies
self._errors2 += errors2
# TODO: check that adaptive does not produce under-/over-flows?
if self.keep_missed:
self.underflow += underflow
self.overflow += overflow
if self._stats:
for key in self._stats:
self._stats[key] += stats.get(key, 0.0) | [
"def",
"fill_n",
"(",
"self",
",",
"values",
",",
"weights",
"=",
"None",
",",
"dropna",
":",
"bool",
"=",
"True",
")",
":",
"# TODO: Unify with HistogramBase",
"values",
"=",
"np",
".",
"asarray",
"(",
"values",
")",
"if",
"dropna",
":",
"values",
"=",
"values",
"[",
"~",
"np",
".",
"isnan",
"(",
"values",
")",
"]",
"if",
"self",
".",
"_binning",
".",
"is_adaptive",
"(",
")",
":",
"map",
"=",
"self",
".",
"_binning",
".",
"force_bin_existence",
"(",
"values",
")",
"self",
".",
"_reshape_data",
"(",
"self",
".",
"_binning",
".",
"bin_count",
",",
"map",
")",
"if",
"weights",
":",
"weights",
"=",
"np",
".",
"asarray",
"(",
"weights",
")",
"self",
".",
"_coerce_dtype",
"(",
"weights",
".",
"dtype",
")",
"(",
"frequencies",
",",
"errors2",
",",
"underflow",
",",
"overflow",
",",
"stats",
")",
"=",
"calculate_frequencies",
"(",
"values",
",",
"self",
".",
"_binning",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"weights",
"=",
"weights",
",",
"validate_bins",
"=",
"False",
")",
"self",
".",
"_frequencies",
"+=",
"frequencies",
"self",
".",
"_errors2",
"+=",
"errors2",
"# TODO: check that adaptive does not produce under-/over-flows?",
"if",
"self",
".",
"keep_missed",
":",
"self",
".",
"underflow",
"+=",
"underflow",
"self",
".",
"overflow",
"+=",
"overflow",
"if",
"self",
".",
"_stats",
":",
"for",
"key",
"in",
"self",
".",
"_stats",
":",
"self",
".",
"_stats",
"[",
"key",
"]",
"+=",
"stats",
".",
"get",
"(",
"key",
",",
"0.0",
")"
] | Update histograms with a set of values.
Parameters
----------
values: array_like
weights: Optional[array_like]
drop_na: Optional[bool]
If true (default), all nan's are skipped. | [
"Update",
"histograms",
"with",
"a",
"set",
"of",
"values",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L410-L441 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.to_xarray | def to_xarray(self) -> "xarray.Dataset":
"""Convert to xarray.Dataset"""
import xarray as xr
data_vars = {
"frequencies": xr.DataArray(self.frequencies, dims="bin"),
"errors2": xr.DataArray(self.errors2, dims="bin"),
"bins": xr.DataArray(self.bins, dims=("bin", "x01"))
}
coords = {}
attrs = {
"underflow": self.underflow,
"overflow": self.overflow,
"inner_missed": self.inner_missed,
"keep_missed": self.keep_missed
}
attrs.update(self._meta_data)
# TODO: Add stats
return xr.Dataset(data_vars, coords, attrs) | python | def to_xarray(self) -> "xarray.Dataset":
"""Convert to xarray.Dataset"""
import xarray as xr
data_vars = {
"frequencies": xr.DataArray(self.frequencies, dims="bin"),
"errors2": xr.DataArray(self.errors2, dims="bin"),
"bins": xr.DataArray(self.bins, dims=("bin", "x01"))
}
coords = {}
attrs = {
"underflow": self.underflow,
"overflow": self.overflow,
"inner_missed": self.inner_missed,
"keep_missed": self.keep_missed
}
attrs.update(self._meta_data)
# TODO: Add stats
return xr.Dataset(data_vars, coords, attrs) | [
"def",
"to_xarray",
"(",
"self",
")",
"->",
"\"xarray.Dataset\"",
":",
"import",
"xarray",
"as",
"xr",
"data_vars",
"=",
"{",
"\"frequencies\"",
":",
"xr",
".",
"DataArray",
"(",
"self",
".",
"frequencies",
",",
"dims",
"=",
"\"bin\"",
")",
",",
"\"errors2\"",
":",
"xr",
".",
"DataArray",
"(",
"self",
".",
"errors2",
",",
"dims",
"=",
"\"bin\"",
")",
",",
"\"bins\"",
":",
"xr",
".",
"DataArray",
"(",
"self",
".",
"bins",
",",
"dims",
"=",
"(",
"\"bin\"",
",",
"\"x01\"",
")",
")",
"}",
"coords",
"=",
"{",
"}",
"attrs",
"=",
"{",
"\"underflow\"",
":",
"self",
".",
"underflow",
",",
"\"overflow\"",
":",
"self",
".",
"overflow",
",",
"\"inner_missed\"",
":",
"self",
".",
"inner_missed",
",",
"\"keep_missed\"",
":",
"self",
".",
"keep_missed",
"}",
"attrs",
".",
"update",
"(",
"self",
".",
"_meta_data",
")",
"# TODO: Add stats",
"return",
"xr",
".",
"Dataset",
"(",
"data_vars",
",",
"coords",
",",
"attrs",
")"
] | Convert to xarray.Dataset | [
"Convert",
"to",
"xarray",
".",
"Dataset"
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L487-L504 | train |
janpipek/physt | physt/histogram1d.py | Histogram1D.from_xarray | def from_xarray(cls, arr: "xarray.Dataset") -> "Histogram1D":
"""Convert form xarray.Dataset
Parameters
----------
arr: The data in xarray representation
"""
kwargs = {'frequencies': arr["frequencies"],
'binning': arr["bins"],
'errors2': arr["errors2"],
'overflow': arr.attrs["overflow"],
'underflow': arr.attrs["underflow"],
'keep_missed': arr.attrs["keep_missed"]}
# TODO: Add stats
return cls(**kwargs) | python | def from_xarray(cls, arr: "xarray.Dataset") -> "Histogram1D":
"""Convert form xarray.Dataset
Parameters
----------
arr: The data in xarray representation
"""
kwargs = {'frequencies': arr["frequencies"],
'binning': arr["bins"],
'errors2': arr["errors2"],
'overflow': arr.attrs["overflow"],
'underflow': arr.attrs["underflow"],
'keep_missed': arr.attrs["keep_missed"]}
# TODO: Add stats
return cls(**kwargs) | [
"def",
"from_xarray",
"(",
"cls",
",",
"arr",
":",
"\"xarray.Dataset\"",
")",
"->",
"\"Histogram1D\"",
":",
"kwargs",
"=",
"{",
"'frequencies'",
":",
"arr",
"[",
"\"frequencies\"",
"]",
",",
"'binning'",
":",
"arr",
"[",
"\"bins\"",
"]",
",",
"'errors2'",
":",
"arr",
"[",
"\"errors2\"",
"]",
",",
"'overflow'",
":",
"arr",
".",
"attrs",
"[",
"\"overflow\"",
"]",
",",
"'underflow'",
":",
"arr",
".",
"attrs",
"[",
"\"underflow\"",
"]",
",",
"'keep_missed'",
":",
"arr",
".",
"attrs",
"[",
"\"keep_missed\"",
"]",
"}",
"# TODO: Add stats",
"return",
"cls",
"(",
"*",
"*",
"kwargs",
")"
] | Convert form xarray.Dataset
Parameters
----------
arr: The data in xarray representation | [
"Convert",
"form",
"xarray",
".",
"Dataset"
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L507-L521 | train |
janpipek/physt | physt/plotting/__init__.py | set_default_backend | def set_default_backend(name: str):
"""Choose a default backend."""
global _default_backend
if name == "bokeh":
raise RuntimeError("Support for bokeh has been discontinued. At some point, we may return to support holoviews.")
if not name in backends:
raise RuntimeError("Backend {0} is not supported and cannot be set as default.".format(name))
_default_backend = name | python | def set_default_backend(name: str):
"""Choose a default backend."""
global _default_backend
if name == "bokeh":
raise RuntimeError("Support for bokeh has been discontinued. At some point, we may return to support holoviews.")
if not name in backends:
raise RuntimeError("Backend {0} is not supported and cannot be set as default.".format(name))
_default_backend = name | [
"def",
"set_default_backend",
"(",
"name",
":",
"str",
")",
":",
"global",
"_default_backend",
"if",
"name",
"==",
"\"bokeh\"",
":",
"raise",
"RuntimeError",
"(",
"\"Support for bokeh has been discontinued. At some point, we may return to support holoviews.\"",
")",
"if",
"not",
"name",
"in",
"backends",
":",
"raise",
"RuntimeError",
"(",
"\"Backend {0} is not supported and cannot be set as default.\"",
".",
"format",
"(",
"name",
")",
")",
"_default_backend",
"=",
"name"
] | Choose a default backend. | [
"Choose",
"a",
"default",
"backend",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/__init__.py#L139-L146 | train |
janpipek/physt | physt/plotting/__init__.py | _get_backend | def _get_backend(name: str = None):
"""Get a plotting backend.
Tries to get it using the name - or the default one.
"""
if not backends:
raise RuntimeError("No plotting backend available. Please, install matplotlib (preferred) or bokeh (limited).")
if not name:
name = _default_backend
if name == "bokeh":
raise RuntimeError("Support for bokeh has been discontinued. At some point, we may return to support holoviews.")
backend = backends.get(name)
if not backend:
raise RuntimeError("Backend {0} does not exist. Use one of the following: {1}".format(name, ", ".join(backends.keys())))
return name, backends[name] | python | def _get_backend(name: str = None):
"""Get a plotting backend.
Tries to get it using the name - or the default one.
"""
if not backends:
raise RuntimeError("No plotting backend available. Please, install matplotlib (preferred) or bokeh (limited).")
if not name:
name = _default_backend
if name == "bokeh":
raise RuntimeError("Support for bokeh has been discontinued. At some point, we may return to support holoviews.")
backend = backends.get(name)
if not backend:
raise RuntimeError("Backend {0} does not exist. Use one of the following: {1}".format(name, ", ".join(backends.keys())))
return name, backends[name] | [
"def",
"_get_backend",
"(",
"name",
":",
"str",
"=",
"None",
")",
":",
"if",
"not",
"backends",
":",
"raise",
"RuntimeError",
"(",
"\"No plotting backend available. Please, install matplotlib (preferred) or bokeh (limited).\"",
")",
"if",
"not",
"name",
":",
"name",
"=",
"_default_backend",
"if",
"name",
"==",
"\"bokeh\"",
":",
"raise",
"RuntimeError",
"(",
"\"Support for bokeh has been discontinued. At some point, we may return to support holoviews.\"",
")",
"backend",
"=",
"backends",
".",
"get",
"(",
"name",
")",
"if",
"not",
"backend",
":",
"raise",
"RuntimeError",
"(",
"\"Backend {0} does not exist. Use one of the following: {1}\"",
".",
"format",
"(",
"name",
",",
"\", \"",
".",
"join",
"(",
"backends",
".",
"keys",
"(",
")",
")",
")",
")",
"return",
"name",
",",
"backends",
"[",
"name",
"]"
] | Get a plotting backend.
Tries to get it using the name - or the default one. | [
"Get",
"a",
"plotting",
"backend",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/__init__.py#L149-L163 | train |
janpipek/physt | physt/plotting/__init__.py | plot | def plot(histogram: HistogramBase, kind: Optional[str] = None, backend: Optional[str] = None, **kwargs):
"""Universal plotting function.
All keyword arguments are passed to the plotting methods.
Parameters
----------
kind: Type of the plot (like "scatter", "line", ...), similar to pandas
"""
backend_name, backend = _get_backend(backend)
if kind is None:
kinds = [t for t in backend.types if histogram.ndim in backend.dims[t]]
if not kinds:
raise RuntimeError("No plot type is supported for {0}"
.format(histogram.__class__.__name__))
kind = kinds[0]
if kind in backend.types:
method = getattr(backend, kind)
return method(histogram, **kwargs)
else:
raise RuntimeError("Histogram type error: {0} missing in backend {1}"
.format(kind, backend_name)) | python | def plot(histogram: HistogramBase, kind: Optional[str] = None, backend: Optional[str] = None, **kwargs):
"""Universal plotting function.
All keyword arguments are passed to the plotting methods.
Parameters
----------
kind: Type of the plot (like "scatter", "line", ...), similar to pandas
"""
backend_name, backend = _get_backend(backend)
if kind is None:
kinds = [t for t in backend.types if histogram.ndim in backend.dims[t]]
if not kinds:
raise RuntimeError("No plot type is supported for {0}"
.format(histogram.__class__.__name__))
kind = kinds[0]
if kind in backend.types:
method = getattr(backend, kind)
return method(histogram, **kwargs)
else:
raise RuntimeError("Histogram type error: {0} missing in backend {1}"
.format(kind, backend_name)) | [
"def",
"plot",
"(",
"histogram",
":",
"HistogramBase",
",",
"kind",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"backend",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"backend_name",
",",
"backend",
"=",
"_get_backend",
"(",
"backend",
")",
"if",
"kind",
"is",
"None",
":",
"kinds",
"=",
"[",
"t",
"for",
"t",
"in",
"backend",
".",
"types",
"if",
"histogram",
".",
"ndim",
"in",
"backend",
".",
"dims",
"[",
"t",
"]",
"]",
"if",
"not",
"kinds",
":",
"raise",
"RuntimeError",
"(",
"\"No plot type is supported for {0}\"",
".",
"format",
"(",
"histogram",
".",
"__class__",
".",
"__name__",
")",
")",
"kind",
"=",
"kinds",
"[",
"0",
"]",
"if",
"kind",
"in",
"backend",
".",
"types",
":",
"method",
"=",
"getattr",
"(",
"backend",
",",
"kind",
")",
"return",
"method",
"(",
"histogram",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Histogram type error: {0} missing in backend {1}\"",
".",
"format",
"(",
"kind",
",",
"backend_name",
")",
")"
] | Universal plotting function.
All keyword arguments are passed to the plotting methods.
Parameters
----------
kind: Type of the plot (like "scatter", "line", ...), similar to pandas | [
"Universal",
"plotting",
"function",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/__init__.py#L166-L187 | train |
janpipek/physt | physt/plotting/vega.py | enable_inline_view | def enable_inline_view(f):
"""Decorator to enable in-line viewing in Python and saving to external file.
It adds several parameters to each decorated plotted function:
Parameters
----------
write_to: str (optional)
Path to write vega JSON/HTML to.
write_format: "auto" | "json" | "html"
Whether to create a JSON data file or a full-fledged HTML page.
display: "auto" | True | False
Whether to try in-line display in IPython
indent: int
Indentation of JSON
"""
@wraps(f)
def wrapper(hist, write_to=None, write_format="auto", display="auto", indent=2, **kwargs):
vega_data = f(hist, **kwargs)
if display is True and not VEGA_IPYTHON_PLUGIN_ENABLED:
raise RuntimeError("Cannot display vega plot: {0}".format(VEGA_ERROR))
if display == "auto":
display = write_to is None
if write_to:
write_vega(vega_data, hist.title, write_to, write_format, indent)
return display_vega(vega_data, display)
return wrapper | python | def enable_inline_view(f):
"""Decorator to enable in-line viewing in Python and saving to external file.
It adds several parameters to each decorated plotted function:
Parameters
----------
write_to: str (optional)
Path to write vega JSON/HTML to.
write_format: "auto" | "json" | "html"
Whether to create a JSON data file or a full-fledged HTML page.
display: "auto" | True | False
Whether to try in-line display in IPython
indent: int
Indentation of JSON
"""
@wraps(f)
def wrapper(hist, write_to=None, write_format="auto", display="auto", indent=2, **kwargs):
vega_data = f(hist, **kwargs)
if display is True and not VEGA_IPYTHON_PLUGIN_ENABLED:
raise RuntimeError("Cannot display vega plot: {0}".format(VEGA_ERROR))
if display == "auto":
display = write_to is None
if write_to:
write_vega(vega_data, hist.title, write_to, write_format, indent)
return display_vega(vega_data, display)
return wrapper | [
"def",
"enable_inline_view",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"hist",
",",
"write_to",
"=",
"None",
",",
"write_format",
"=",
"\"auto\"",
",",
"display",
"=",
"\"auto\"",
",",
"indent",
"=",
"2",
",",
"*",
"*",
"kwargs",
")",
":",
"vega_data",
"=",
"f",
"(",
"hist",
",",
"*",
"*",
"kwargs",
")",
"if",
"display",
"is",
"True",
"and",
"not",
"VEGA_IPYTHON_PLUGIN_ENABLED",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot display vega plot: {0}\"",
".",
"format",
"(",
"VEGA_ERROR",
")",
")",
"if",
"display",
"==",
"\"auto\"",
":",
"display",
"=",
"write_to",
"is",
"None",
"if",
"write_to",
":",
"write_vega",
"(",
"vega_data",
",",
"hist",
".",
"title",
",",
"write_to",
",",
"write_format",
",",
"indent",
")",
"return",
"display_vega",
"(",
"vega_data",
",",
"display",
")",
"return",
"wrapper"
] | Decorator to enable in-line viewing in Python and saving to external file.
It adds several parameters to each decorated plotted function:
Parameters
----------
write_to: str (optional)
Path to write vega JSON/HTML to.
write_format: "auto" | "json" | "html"
Whether to create a JSON data file or a full-fledged HTML page.
display: "auto" | True | False
Whether to try in-line display in IPython
indent: int
Indentation of JSON | [
"Decorator",
"to",
"enable",
"in",
"-",
"line",
"viewing",
"in",
"Python",
"and",
"saving",
"to",
"external",
"file",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L102-L134 | train |
janpipek/physt | physt/plotting/vega.py | write_vega | def write_vega(vega_data, *, title: Optional[str], write_to: str, write_format: str = "auto", indent: int = 2):
"""Write vega dictionary to an external file.
Parameters
----------
vega_data : Valid vega data as dictionary
write_to: Path to write vega JSON/HTML to.
write_format: "auto" | "json" | "html"
Whether to create a JSON data file or a full-fledged HTML page.
indent: Indentation of JSON
"""
spec = json.dumps(vega_data, indent=indent)
if write_format == "html" or write_format is "auto" and write_to.endswith(".html"):
output = HTML_TEMPLATE.replace("{{ title }}", title or "Histogram").replace("{{ spec }}", spec)
elif write_format == "json" or write_format is "auto" and write_to.endswith(".json"):
output = spec
else:
raise RuntimeError("Format not understood.")
with codecs.open(write_to, "w", encoding="utf-8") as out:
out.write(output) | python | def write_vega(vega_data, *, title: Optional[str], write_to: str, write_format: str = "auto", indent: int = 2):
"""Write vega dictionary to an external file.
Parameters
----------
vega_data : Valid vega data as dictionary
write_to: Path to write vega JSON/HTML to.
write_format: "auto" | "json" | "html"
Whether to create a JSON data file or a full-fledged HTML page.
indent: Indentation of JSON
"""
spec = json.dumps(vega_data, indent=indent)
if write_format == "html" or write_format is "auto" and write_to.endswith(".html"):
output = HTML_TEMPLATE.replace("{{ title }}", title or "Histogram").replace("{{ spec }}", spec)
elif write_format == "json" or write_format is "auto" and write_to.endswith(".json"):
output = spec
else:
raise RuntimeError("Format not understood.")
with codecs.open(write_to, "w", encoding="utf-8") as out:
out.write(output) | [
"def",
"write_vega",
"(",
"vega_data",
",",
"*",
",",
"title",
":",
"Optional",
"[",
"str",
"]",
",",
"write_to",
":",
"str",
",",
"write_format",
":",
"str",
"=",
"\"auto\"",
",",
"indent",
":",
"int",
"=",
"2",
")",
":",
"spec",
"=",
"json",
".",
"dumps",
"(",
"vega_data",
",",
"indent",
"=",
"indent",
")",
"if",
"write_format",
"==",
"\"html\"",
"or",
"write_format",
"is",
"\"auto\"",
"and",
"write_to",
".",
"endswith",
"(",
"\".html\"",
")",
":",
"output",
"=",
"HTML_TEMPLATE",
".",
"replace",
"(",
"\"{{ title }}\"",
",",
"title",
"or",
"\"Histogram\"",
")",
".",
"replace",
"(",
"\"{{ spec }}\"",
",",
"spec",
")",
"elif",
"write_format",
"==",
"\"json\"",
"or",
"write_format",
"is",
"\"auto\"",
"and",
"write_to",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"output",
"=",
"spec",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Format not understood.\"",
")",
"with",
"codecs",
".",
"open",
"(",
"write_to",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"output",
")"
] | Write vega dictionary to an external file.
Parameters
----------
vega_data : Valid vega data as dictionary
write_to: Path to write vega JSON/HTML to.
write_format: "auto" | "json" | "html"
Whether to create a JSON data file or a full-fledged HTML page.
indent: Indentation of JSON | [
"Write",
"vega",
"dictionary",
"to",
"an",
"external",
"file",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L137-L156 | train |
janpipek/physt | physt/plotting/vega.py | display_vega | def display_vega(vega_data: dict, display: bool = True) -> Union['Vega', dict]:
"""Optionally display vega dictionary.
Parameters
----------
vega_data : Valid vega data as dictionary
display: Whether to try in-line display in IPython
"""
if VEGA_IPYTHON_PLUGIN_ENABLED and display:
from vega3 import Vega
return Vega(vega_data)
else:
return vega_data | python | def display_vega(vega_data: dict, display: bool = True) -> Union['Vega', dict]:
"""Optionally display vega dictionary.
Parameters
----------
vega_data : Valid vega data as dictionary
display: Whether to try in-line display in IPython
"""
if VEGA_IPYTHON_PLUGIN_ENABLED and display:
from vega3 import Vega
return Vega(vega_data)
else:
return vega_data | [
"def",
"display_vega",
"(",
"vega_data",
":",
"dict",
",",
"display",
":",
"bool",
"=",
"True",
")",
"->",
"Union",
"[",
"'Vega'",
",",
"dict",
"]",
":",
"if",
"VEGA_IPYTHON_PLUGIN_ENABLED",
"and",
"display",
":",
"from",
"vega3",
"import",
"Vega",
"return",
"Vega",
"(",
"vega_data",
")",
"else",
":",
"return",
"vega_data"
] | Optionally display vega dictionary.
Parameters
----------
vega_data : Valid vega data as dictionary
display: Whether to try in-line display in IPython | [
"Optionally",
"display",
"vega",
"dictionary",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L159-L171 | train |
janpipek/physt | physt/plotting/vega.py | bar | def bar(h1: Histogram1D, **kwargs) -> dict:
"""Bar plot of 1D histogram.
Parameters
----------
lw : float
Width of the line between bars
alpha : float
Opacity of the bars
hover_alpha: float
Opacity of the bars when hover on
"""
# TODO: Enable collections
# TODO: Enable legend
vega = _create_figure(kwargs)
_add_title(h1, vega, kwargs)
_create_scales(h1, vega, kwargs)
_create_axes(h1, vega, kwargs)
data = get_data(h1, kwargs.pop("density", None), kwargs.pop("cumulative", None)).tolist()
lefts = h1.bin_left_edges.astype(float).tolist()
rights = h1.bin_right_edges.astype(float).tolist()
vega["data"] = [{
"name": "table",
"values": [{
"x": lefts[i],
"x2": rights[i],
"y": data[i],
}
for i in range(h1.bin_count)
]
}]
alpha = kwargs.pop("alpha", 1)
# hover_alpha = kwargs.pop("hover_alpha", alpha)
vega["marks"] = [
{
"type": "rect",
"from": {"data": "table"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"x2": {"scale": "xscale", "field": "x2"},
"y": {"scale": "yscale", "value": 0},
"y2": {"scale": "yscale", "field": "y"},
# "stroke": {"scale": "color", "field": "c"},
"strokeWidth": {"value": kwargs.pop("lw", 2)}
},
"update": {
"fillOpacity": [
# {"test": "datum === tooltip", "value": hover_alpha},
{"value": alpha}
]
},
}
}
]
_create_tooltips(h1, vega, kwargs)
return vega | python | def bar(h1: Histogram1D, **kwargs) -> dict:
"""Bar plot of 1D histogram.
Parameters
----------
lw : float
Width of the line between bars
alpha : float
Opacity of the bars
hover_alpha: float
Opacity of the bars when hover on
"""
# TODO: Enable collections
# TODO: Enable legend
vega = _create_figure(kwargs)
_add_title(h1, vega, kwargs)
_create_scales(h1, vega, kwargs)
_create_axes(h1, vega, kwargs)
data = get_data(h1, kwargs.pop("density", None), kwargs.pop("cumulative", None)).tolist()
lefts = h1.bin_left_edges.astype(float).tolist()
rights = h1.bin_right_edges.astype(float).tolist()
vega["data"] = [{
"name": "table",
"values": [{
"x": lefts[i],
"x2": rights[i],
"y": data[i],
}
for i in range(h1.bin_count)
]
}]
alpha = kwargs.pop("alpha", 1)
# hover_alpha = kwargs.pop("hover_alpha", alpha)
vega["marks"] = [
{
"type": "rect",
"from": {"data": "table"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"x2": {"scale": "xscale", "field": "x2"},
"y": {"scale": "yscale", "value": 0},
"y2": {"scale": "yscale", "field": "y"},
# "stroke": {"scale": "color", "field": "c"},
"strokeWidth": {"value": kwargs.pop("lw", 2)}
},
"update": {
"fillOpacity": [
# {"test": "datum === tooltip", "value": hover_alpha},
{"value": alpha}
]
},
}
}
]
_create_tooltips(h1, vega, kwargs)
return vega | [
"def",
"bar",
"(",
"h1",
":",
"Histogram1D",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"# TODO: Enable collections",
"# TODO: Enable legend",
"vega",
"=",
"_create_figure",
"(",
"kwargs",
")",
"_add_title",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"_create_scales",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"_create_axes",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"data",
"=",
"get_data",
"(",
"h1",
",",
"kwargs",
".",
"pop",
"(",
"\"density\"",
",",
"None",
")",
",",
"kwargs",
".",
"pop",
"(",
"\"cumulative\"",
",",
"None",
")",
")",
".",
"tolist",
"(",
")",
"lefts",
"=",
"h1",
".",
"bin_left_edges",
".",
"astype",
"(",
"float",
")",
".",
"tolist",
"(",
")",
"rights",
"=",
"h1",
".",
"bin_right_edges",
".",
"astype",
"(",
"float",
")",
".",
"tolist",
"(",
")",
"vega",
"[",
"\"data\"",
"]",
"=",
"[",
"{",
"\"name\"",
":",
"\"table\"",
",",
"\"values\"",
":",
"[",
"{",
"\"x\"",
":",
"lefts",
"[",
"i",
"]",
",",
"\"x2\"",
":",
"rights",
"[",
"i",
"]",
",",
"\"y\"",
":",
"data",
"[",
"i",
"]",
",",
"}",
"for",
"i",
"in",
"range",
"(",
"h1",
".",
"bin_count",
")",
"]",
"}",
"]",
"alpha",
"=",
"kwargs",
".",
"pop",
"(",
"\"alpha\"",
",",
"1",
")",
"# hover_alpha = kwargs.pop(\"hover_alpha\", alpha)",
"vega",
"[",
"\"marks\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"rect\"",
",",
"\"from\"",
":",
"{",
"\"data\"",
":",
"\"table\"",
"}",
",",
"\"encode\"",
":",
"{",
"\"enter\"",
":",
"{",
"\"x\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"field\"",
":",
"\"x\"",
"}",
",",
"\"x2\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"field\"",
":",
"\"x2\"",
"}",
",",
"\"y\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"value\"",
":",
"0",
"}",
",",
"\"y2\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"field\"",
":",
"\"y\"",
"}",
",",
"# \"stroke\": {\"scale\": \"color\", \"field\": \"c\"},",
"\"strokeWidth\"",
":",
"{",
"\"value\"",
":",
"kwargs",
".",
"pop",
"(",
"\"lw\"",
",",
"2",
")",
"}",
"}",
",",
"\"update\"",
":",
"{",
"\"fillOpacity\"",
":",
"[",
"# {\"test\": \"datum === tooltip\", \"value\": hover_alpha},",
"{",
"\"value\"",
":",
"alpha",
"}",
"]",
"}",
",",
"}",
"}",
"]",
"_create_tooltips",
"(",
"h1",
",",
"vega",
",",
"kwargs",
")",
"return",
"vega"
] | Bar plot of 1D histogram.
Parameters
----------
lw : float
Width of the line between bars
alpha : float
Opacity of the bars
hover_alpha: float
Opacity of the bars when hover on | [
"Bar",
"plot",
"of",
"1D",
"histogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L175-L237 | train |
janpipek/physt | physt/plotting/vega.py | scatter | def scatter(h1: Histogram1D, **kwargs) -> dict:
"""Scatter plot of 1D histogram values.
Points are horizontally placed in bin centers.
Parameters
----------
shape : str
"""
shape = kwargs.pop("shape", DEFAULT_SCATTER_SHAPE)
# size = kwargs.pop("size", DEFAULT_SCATTER_SIZE)
mark_template = [{
"type": "symbol",
"from": {"data": "series"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"y": {"scale": "yscale", "field": "y"},
"shape": {"value": shape},
# "size": {"value": size},
"fill": {"scale": "series", "field": "c"},
},
}
}]
vega = _scatter_or_line(h1, mark_template=mark_template, kwargs=kwargs)
return vega | python | def scatter(h1: Histogram1D, **kwargs) -> dict:
"""Scatter plot of 1D histogram values.
Points are horizontally placed in bin centers.
Parameters
----------
shape : str
"""
shape = kwargs.pop("shape", DEFAULT_SCATTER_SHAPE)
# size = kwargs.pop("size", DEFAULT_SCATTER_SIZE)
mark_template = [{
"type": "symbol",
"from": {"data": "series"},
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"y": {"scale": "yscale", "field": "y"},
"shape": {"value": shape},
# "size": {"value": size},
"fill": {"scale": "series", "field": "c"},
},
}
}]
vega = _scatter_or_line(h1, mark_template=mark_template, kwargs=kwargs)
return vega | [
"def",
"scatter",
"(",
"h1",
":",
"Histogram1D",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"shape",
"=",
"kwargs",
".",
"pop",
"(",
"\"shape\"",
",",
"DEFAULT_SCATTER_SHAPE",
")",
"# size = kwargs.pop(\"size\", DEFAULT_SCATTER_SIZE)",
"mark_template",
"=",
"[",
"{",
"\"type\"",
":",
"\"symbol\"",
",",
"\"from\"",
":",
"{",
"\"data\"",
":",
"\"series\"",
"}",
",",
"\"encode\"",
":",
"{",
"\"enter\"",
":",
"{",
"\"x\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"field\"",
":",
"\"x\"",
"}",
",",
"\"y\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"field\"",
":",
"\"y\"",
"}",
",",
"\"shape\"",
":",
"{",
"\"value\"",
":",
"shape",
"}",
",",
"# \"size\": {\"value\": size},",
"\"fill\"",
":",
"{",
"\"scale\"",
":",
"\"series\"",
",",
"\"field\"",
":",
"\"c\"",
"}",
",",
"}",
",",
"}",
"}",
"]",
"vega",
"=",
"_scatter_or_line",
"(",
"h1",
",",
"mark_template",
"=",
"mark_template",
",",
"kwargs",
"=",
"kwargs",
")",
"return",
"vega"
] | Scatter plot of 1D histogram values.
Points are horizontally placed in bin centers.
Parameters
----------
shape : str | [
"Scatter",
"plot",
"of",
"1D",
"histogram",
"values",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L244-L270 | train |
janpipek/physt | physt/plotting/vega.py | line | def line(h1: Histogram1D, **kwargs) -> dict:
"""Line plot of 1D histogram values.
Points are horizontally placed in bin centers.
Parameters
----------
h1 : physt.histogram1d.Histogram1D
Dimensionality of histogram for which it is applicable
"""
lw = kwargs.pop("lw", DEFAULT_STROKE_WIDTH)
mark_template = [{
"type": "line",
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"y": {"scale": "yscale", "field": "y"},
"stroke": {"scale": "series", "field": "c"},
"strokeWidth": {"value": lw}
}
},
"from": {"data": "series"},
}]
vega = _scatter_or_line(h1, mark_template=mark_template, kwargs=kwargs)
return vega | python | def line(h1: Histogram1D, **kwargs) -> dict:
"""Line plot of 1D histogram values.
Points are horizontally placed in bin centers.
Parameters
----------
h1 : physt.histogram1d.Histogram1D
Dimensionality of histogram for which it is applicable
"""
lw = kwargs.pop("lw", DEFAULT_STROKE_WIDTH)
mark_template = [{
"type": "line",
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"y": {"scale": "yscale", "field": "y"},
"stroke": {"scale": "series", "field": "c"},
"strokeWidth": {"value": lw}
}
},
"from": {"data": "series"},
}]
vega = _scatter_or_line(h1, mark_template=mark_template, kwargs=kwargs)
return vega | [
"def",
"line",
"(",
"h1",
":",
"Histogram1D",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"lw",
"=",
"kwargs",
".",
"pop",
"(",
"\"lw\"",
",",
"DEFAULT_STROKE_WIDTH",
")",
"mark_template",
"=",
"[",
"{",
"\"type\"",
":",
"\"line\"",
",",
"\"encode\"",
":",
"{",
"\"enter\"",
":",
"{",
"\"x\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"field\"",
":",
"\"x\"",
"}",
",",
"\"y\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"field\"",
":",
"\"y\"",
"}",
",",
"\"stroke\"",
":",
"{",
"\"scale\"",
":",
"\"series\"",
",",
"\"field\"",
":",
"\"c\"",
"}",
",",
"\"strokeWidth\"",
":",
"{",
"\"value\"",
":",
"lw",
"}",
"}",
"}",
",",
"\"from\"",
":",
"{",
"\"data\"",
":",
"\"series\"",
"}",
",",
"}",
"]",
"vega",
"=",
"_scatter_or_line",
"(",
"h1",
",",
"mark_template",
"=",
"mark_template",
",",
"kwargs",
"=",
"kwargs",
")",
"return",
"vega"
] | Line plot of 1D histogram values.
Points are horizontally placed in bin centers.
Parameters
----------
h1 : physt.histogram1d.Histogram1D
Dimensionality of histogram for which it is applicable | [
"Line",
"plot",
"of",
"1D",
"histogram",
"values",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L276-L302 | train |
janpipek/physt | physt/plotting/vega.py | _create_figure | def _create_figure(kwargs: Mapping[str, Any]) -> dict:
"""Create basic dictionary object with figure properties."""
return {
"$schema": "https://vega.github.io/schema/vega/v3.json",
"width": kwargs.pop("width", DEFAULT_WIDTH),
"height": kwargs.pop("height", DEFAULT_HEIGHT),
"padding": kwargs.pop("padding", DEFAULT_PADDING)
} | python | def _create_figure(kwargs: Mapping[str, Any]) -> dict:
"""Create basic dictionary object with figure properties."""
return {
"$schema": "https://vega.github.io/schema/vega/v3.json",
"width": kwargs.pop("width", DEFAULT_WIDTH),
"height": kwargs.pop("height", DEFAULT_HEIGHT),
"padding": kwargs.pop("padding", DEFAULT_PADDING)
} | [
"def",
"_create_figure",
"(",
"kwargs",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"dict",
":",
"return",
"{",
"\"$schema\"",
":",
"\"https://vega.github.io/schema/vega/v3.json\"",
",",
"\"width\"",
":",
"kwargs",
".",
"pop",
"(",
"\"width\"",
",",
"DEFAULT_WIDTH",
")",
",",
"\"height\"",
":",
"kwargs",
".",
"pop",
"(",
"\"height\"",
",",
"DEFAULT_HEIGHT",
")",
",",
"\"padding\"",
":",
"kwargs",
".",
"pop",
"(",
"\"padding\"",
",",
"DEFAULT_PADDING",
")",
"}"
] | Create basic dictionary object with figure properties. | [
"Create",
"basic",
"dictionary",
"object",
"with",
"figure",
"properties",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L551-L558 | train |
janpipek/physt | physt/plotting/vega.py | _create_scales | def _create_scales(hist: HistogramBase, vega: dict, kwargs: dict):
"""Find proper scales for axes."""
if hist.ndim == 1:
bins0 = hist.bins.astype(float)
else:
bins0 = hist.bins[0].astype(float)
xlim = kwargs.pop("xlim", "auto")
ylim = kwargs.pop("ylim", "auto")
if xlim is "auto":
nice_x = True
else:
nice_x = False
if ylim is "auto":
nice_y = True
else:
nice_y = False
# TODO: Unify xlim & ylim parameters with matplotlib
# TODO: Apply xscale & yscale parameters
vega["scales"] = [
{
"name": "xscale",
"type": "linear",
"range": "width",
"nice": nice_x,
"zero": None,
"domain": [bins0[0, 0], bins0[-1, 1]] if xlim == "auto" else [float(xlim[0]), float(xlim[1])],
# "domain": {"data": "table", "field": "x"}
},
{
"name": "yscale",
"type": "linear",
"range": "height",
"nice": nice_y,
"zero": True if hist.ndim == 1 else None,
"domain": {"data": "table", "field": "y"} if ylim == "auto" else [float(ylim[0]), float(ylim[1])]
}
]
if hist.ndim >= 2:
bins1 = hist.bins[1].astype(float)
vega["scales"][1]["domain"] = [bins1[0, 0], bins1[-1, 1]] | python | def _create_scales(hist: HistogramBase, vega: dict, kwargs: dict):
"""Find proper scales for axes."""
if hist.ndim == 1:
bins0 = hist.bins.astype(float)
else:
bins0 = hist.bins[0].astype(float)
xlim = kwargs.pop("xlim", "auto")
ylim = kwargs.pop("ylim", "auto")
if xlim is "auto":
nice_x = True
else:
nice_x = False
if ylim is "auto":
nice_y = True
else:
nice_y = False
# TODO: Unify xlim & ylim parameters with matplotlib
# TODO: Apply xscale & yscale parameters
vega["scales"] = [
{
"name": "xscale",
"type": "linear",
"range": "width",
"nice": nice_x,
"zero": None,
"domain": [bins0[0, 0], bins0[-1, 1]] if xlim == "auto" else [float(xlim[0]), float(xlim[1])],
# "domain": {"data": "table", "field": "x"}
},
{
"name": "yscale",
"type": "linear",
"range": "height",
"nice": nice_y,
"zero": True if hist.ndim == 1 else None,
"domain": {"data": "table", "field": "y"} if ylim == "auto" else [float(ylim[0]), float(ylim[1])]
}
]
if hist.ndim >= 2:
bins1 = hist.bins[1].astype(float)
vega["scales"][1]["domain"] = [bins1[0, 0], bins1[-1, 1]] | [
"def",
"_create_scales",
"(",
"hist",
":",
"HistogramBase",
",",
"vega",
":",
"dict",
",",
"kwargs",
":",
"dict",
")",
":",
"if",
"hist",
".",
"ndim",
"==",
"1",
":",
"bins0",
"=",
"hist",
".",
"bins",
".",
"astype",
"(",
"float",
")",
"else",
":",
"bins0",
"=",
"hist",
".",
"bins",
"[",
"0",
"]",
".",
"astype",
"(",
"float",
")",
"xlim",
"=",
"kwargs",
".",
"pop",
"(",
"\"xlim\"",
",",
"\"auto\"",
")",
"ylim",
"=",
"kwargs",
".",
"pop",
"(",
"\"ylim\"",
",",
"\"auto\"",
")",
"if",
"xlim",
"is",
"\"auto\"",
":",
"nice_x",
"=",
"True",
"else",
":",
"nice_x",
"=",
"False",
"if",
"ylim",
"is",
"\"auto\"",
":",
"nice_y",
"=",
"True",
"else",
":",
"nice_y",
"=",
"False",
"# TODO: Unify xlim & ylim parameters with matplotlib",
"# TODO: Apply xscale & yscale parameters",
"vega",
"[",
"\"scales\"",
"]",
"=",
"[",
"{",
"\"name\"",
":",
"\"xscale\"",
",",
"\"type\"",
":",
"\"linear\"",
",",
"\"range\"",
":",
"\"width\"",
",",
"\"nice\"",
":",
"nice_x",
",",
"\"zero\"",
":",
"None",
",",
"\"domain\"",
":",
"[",
"bins0",
"[",
"0",
",",
"0",
"]",
",",
"bins0",
"[",
"-",
"1",
",",
"1",
"]",
"]",
"if",
"xlim",
"==",
"\"auto\"",
"else",
"[",
"float",
"(",
"xlim",
"[",
"0",
"]",
")",
",",
"float",
"(",
"xlim",
"[",
"1",
"]",
")",
"]",
",",
"# \"domain\": {\"data\": \"table\", \"field\": \"x\"}",
"}",
",",
"{",
"\"name\"",
":",
"\"yscale\"",
",",
"\"type\"",
":",
"\"linear\"",
",",
"\"range\"",
":",
"\"height\"",
",",
"\"nice\"",
":",
"nice_y",
",",
"\"zero\"",
":",
"True",
"if",
"hist",
".",
"ndim",
"==",
"1",
"else",
"None",
",",
"\"domain\"",
":",
"{",
"\"data\"",
":",
"\"table\"",
",",
"\"field\"",
":",
"\"y\"",
"}",
"if",
"ylim",
"==",
"\"auto\"",
"else",
"[",
"float",
"(",
"ylim",
"[",
"0",
"]",
")",
",",
"float",
"(",
"ylim",
"[",
"1",
"]",
")",
"]",
"}",
"]",
"if",
"hist",
".",
"ndim",
">=",
"2",
":",
"bins1",
"=",
"hist",
".",
"bins",
"[",
"1",
"]",
".",
"astype",
"(",
"float",
")",
"vega",
"[",
"\"scales\"",
"]",
"[",
"1",
"]",
"[",
"\"domain\"",
"]",
"=",
"[",
"bins1",
"[",
"0",
",",
"0",
"]",
",",
"bins1",
"[",
"-",
"1",
",",
"1",
"]",
"]"
] | Find proper scales for axes. | [
"Find",
"proper",
"scales",
"for",
"axes",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L568-L613 | train |
janpipek/physt | physt/plotting/vega.py | _create_axes | def _create_axes(hist: HistogramBase, vega: dict, kwargs: dict):
"""Create axes in the figure."""
xlabel = kwargs.pop("xlabel", hist.axis_names[0])
ylabel = kwargs.pop("ylabel", hist.axis_names[1] if len(hist.axis_names) >= 2 else None)
vega["axes"] = [
{"orient": "bottom", "scale": "xscale", "title": xlabel},
{"orient": "left", "scale": "yscale", "title": ylabel}
] | python | def _create_axes(hist: HistogramBase, vega: dict, kwargs: dict):
"""Create axes in the figure."""
xlabel = kwargs.pop("xlabel", hist.axis_names[0])
ylabel = kwargs.pop("ylabel", hist.axis_names[1] if len(hist.axis_names) >= 2 else None)
vega["axes"] = [
{"orient": "bottom", "scale": "xscale", "title": xlabel},
{"orient": "left", "scale": "yscale", "title": ylabel}
] | [
"def",
"_create_axes",
"(",
"hist",
":",
"HistogramBase",
",",
"vega",
":",
"dict",
",",
"kwargs",
":",
"dict",
")",
":",
"xlabel",
"=",
"kwargs",
".",
"pop",
"(",
"\"xlabel\"",
",",
"hist",
".",
"axis_names",
"[",
"0",
"]",
")",
"ylabel",
"=",
"kwargs",
".",
"pop",
"(",
"\"ylabel\"",
",",
"hist",
".",
"axis_names",
"[",
"1",
"]",
"if",
"len",
"(",
"hist",
".",
"axis_names",
")",
">=",
"2",
"else",
"None",
")",
"vega",
"[",
"\"axes\"",
"]",
"=",
"[",
"{",
"\"orient\"",
":",
"\"bottom\"",
",",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"title\"",
":",
"xlabel",
"}",
",",
"{",
"\"orient\"",
":",
"\"left\"",
",",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"title\"",
":",
"ylabel",
"}",
"]"
] | Create axes in the figure. | [
"Create",
"axes",
"in",
"the",
"figure",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L673-L680 | train |
janpipek/physt | physt/plotting/vega.py | _create_tooltips | def _create_tooltips(hist: Histogram1D, vega: dict, kwargs: dict):
"""In one-dimensional plots, show values above the value on hover."""
if kwargs.pop("tooltips", False):
vega["signals"] = vega.get("signals", [])
vega["signals"].append({
"name": "tooltip",
"value": {},
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"}
]
})
font_size = kwargs.get("fontsize", DEFAULT_FONTSIZE)
vega["marks"] = vega.get("marks", [])
vega["marks"].append({
"type": "text",
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"},
"fontSize": {"value": font_size}
},
"update": {
"x": {"scale": "xscale", "signal": "(tooltip.x + tooltip.x2) / 2", "band": 0.5},
"y": {"scale": "yscale", "signal": "tooltip.y", "offset": -2},
"text": {"signal": "tooltip.y"},
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1}
]
}
}
}) | python | def _create_tooltips(hist: Histogram1D, vega: dict, kwargs: dict):
"""In one-dimensional plots, show values above the value on hover."""
if kwargs.pop("tooltips", False):
vega["signals"] = vega.get("signals", [])
vega["signals"].append({
"name": "tooltip",
"value": {},
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"}
]
})
font_size = kwargs.get("fontsize", DEFAULT_FONTSIZE)
vega["marks"] = vega.get("marks", [])
vega["marks"].append({
"type": "text",
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"},
"fontSize": {"value": font_size}
},
"update": {
"x": {"scale": "xscale", "signal": "(tooltip.x + tooltip.x2) / 2", "band": 0.5},
"y": {"scale": "yscale", "signal": "tooltip.y", "offset": -2},
"text": {"signal": "tooltip.y"},
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1}
]
}
}
}) | [
"def",
"_create_tooltips",
"(",
"hist",
":",
"Histogram1D",
",",
"vega",
":",
"dict",
",",
"kwargs",
":",
"dict",
")",
":",
"if",
"kwargs",
".",
"pop",
"(",
"\"tooltips\"",
",",
"False",
")",
":",
"vega",
"[",
"\"signals\"",
"]",
"=",
"vega",
".",
"get",
"(",
"\"signals\"",
",",
"[",
"]",
")",
"vega",
"[",
"\"signals\"",
"]",
".",
"append",
"(",
"{",
"\"name\"",
":",
"\"tooltip\"",
",",
"\"value\"",
":",
"{",
"}",
",",
"\"on\"",
":",
"[",
"{",
"\"events\"",
":",
"\"rect:mouseover\"",
",",
"\"update\"",
":",
"\"datum\"",
"}",
",",
"{",
"\"events\"",
":",
"\"rect:mouseout\"",
",",
"\"update\"",
":",
"\"{}\"",
"}",
"]",
"}",
")",
"font_size",
"=",
"kwargs",
".",
"get",
"(",
"\"fontsize\"",
",",
"DEFAULT_FONTSIZE",
")",
"vega",
"[",
"\"marks\"",
"]",
"=",
"vega",
".",
"get",
"(",
"\"marks\"",
",",
"[",
"]",
")",
"vega",
"[",
"\"marks\"",
"]",
".",
"append",
"(",
"{",
"\"type\"",
":",
"\"text\"",
",",
"\"encode\"",
":",
"{",
"\"enter\"",
":",
"{",
"\"align\"",
":",
"{",
"\"value\"",
":",
"\"center\"",
"}",
",",
"\"baseline\"",
":",
"{",
"\"value\"",
":",
"\"bottom\"",
"}",
",",
"\"fill\"",
":",
"{",
"\"value\"",
":",
"\"#333\"",
"}",
",",
"\"fontSize\"",
":",
"{",
"\"value\"",
":",
"font_size",
"}",
"}",
",",
"\"update\"",
":",
"{",
"\"x\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"signal\"",
":",
"\"(tooltip.x + tooltip.x2) / 2\"",
",",
"\"band\"",
":",
"0.5",
"}",
",",
"\"y\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"signal\"",
":",
"\"tooltip.y\"",
",",
"\"offset\"",
":",
"-",
"2",
"}",
",",
"\"text\"",
":",
"{",
"\"signal\"",
":",
"\"tooltip.y\"",
"}",
",",
"\"fillOpacity\"",
":",
"[",
"{",
"\"test\"",
":",
"\"datum === tooltip\"",
",",
"\"value\"",
":",
"0",
"}",
",",
"{",
"\"value\"",
":",
"1",
"}",
"]",
"}",
"}",
"}",
")"
] | In one-dimensional plots, show values above the value on hover. | [
"In",
"one",
"-",
"dimensional",
"plots",
"show",
"values",
"above",
"the",
"value",
"on",
"hover",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L683-L718 | train |
janpipek/physt | physt/plotting/vega.py | _add_title | def _add_title(hist: HistogramBase, vega: dict, kwargs: dict):
"""Display plot title if available."""
title = kwargs.pop("title", hist.title)
if title:
vega["title"] = {
"text": title
} | python | def _add_title(hist: HistogramBase, vega: dict, kwargs: dict):
"""Display plot title if available."""
title = kwargs.pop("title", hist.title)
if title:
vega["title"] = {
"text": title
} | [
"def",
"_add_title",
"(",
"hist",
":",
"HistogramBase",
",",
"vega",
":",
"dict",
",",
"kwargs",
":",
"dict",
")",
":",
"title",
"=",
"kwargs",
".",
"pop",
"(",
"\"title\"",
",",
"hist",
".",
"title",
")",
"if",
"title",
":",
"vega",
"[",
"\"title\"",
"]",
"=",
"{",
"\"text\"",
":",
"title",
"}"
] | Display plot title if available. | [
"Display",
"plot",
"title",
"if",
"available",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L721-L727 | train |
janpipek/physt | physt/special.py | _prepare_data | def _prepare_data(data, transformed, klass, *args, **kwargs):
"""Transform data for binning.
Returns
-------
np.ndarray
"""
# TODO: Maybe include in the class itself?
data = np.asarray(data)
if not transformed:
data = klass.transform(data)
dropna = kwargs.get("dropna", False)
if dropna:
data = data[~np.isnan(data).any(axis=1)]
return data | python | def _prepare_data(data, transformed, klass, *args, **kwargs):
"""Transform data for binning.
Returns
-------
np.ndarray
"""
# TODO: Maybe include in the class itself?
data = np.asarray(data)
if not transformed:
data = klass.transform(data)
dropna = kwargs.get("dropna", False)
if dropna:
data = data[~np.isnan(data).any(axis=1)]
return data | [
"def",
"_prepare_data",
"(",
"data",
",",
"transformed",
",",
"klass",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: Maybe include in the class itself?",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"if",
"not",
"transformed",
":",
"data",
"=",
"klass",
".",
"transform",
"(",
"data",
")",
"dropna",
"=",
"kwargs",
".",
"get",
"(",
"\"dropna\"",
",",
"False",
")",
"if",
"dropna",
":",
"data",
"=",
"data",
"[",
"~",
"np",
".",
"isnan",
"(",
"data",
")",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"]",
"return",
"data"
] | Transform data for binning.
Returns
-------
np.ndarray | [
"Transform",
"data",
"for",
"binning",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/special.py#L331-L345 | train |
janpipek/physt | physt/special.py | polar_histogram | def polar_histogram(xdata, ydata, radial_bins="numpy", phi_bins=16,
transformed=False, *args, **kwargs):
"""Facade construction function for the PolarHistogram.
Parameters
----------
transformed : bool
phi_range : Optional[tuple]
range
"""
dropna = kwargs.pop("dropna", True)
data = np.concatenate([xdata[:, np.newaxis], ydata[:, np.newaxis]], axis=1)
data = _prepare_data(data, transformed=transformed, klass=PolarHistogram, dropna=dropna)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][1]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, phi_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=2,
binnings=bin_schemas,
weights=weights)
return PolarHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed) | python | def polar_histogram(xdata, ydata, radial_bins="numpy", phi_bins=16,
transformed=False, *args, **kwargs):
"""Facade construction function for the PolarHistogram.
Parameters
----------
transformed : bool
phi_range : Optional[tuple]
range
"""
dropna = kwargs.pop("dropna", True)
data = np.concatenate([xdata[:, np.newaxis], ydata[:, np.newaxis]], axis=1)
data = _prepare_data(data, transformed=transformed, klass=PolarHistogram, dropna=dropna)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][1]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, phi_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=2,
binnings=bin_schemas,
weights=weights)
return PolarHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed) | [
"def",
"polar_histogram",
"(",
"xdata",
",",
"ydata",
",",
"radial_bins",
"=",
"\"numpy\"",
",",
"phi_bins",
"=",
"16",
",",
"transformed",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"dropna",
"=",
"kwargs",
".",
"pop",
"(",
"\"dropna\"",
",",
"True",
")",
"data",
"=",
"np",
".",
"concatenate",
"(",
"[",
"xdata",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"ydata",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"data",
"=",
"_prepare_data",
"(",
"data",
",",
"transformed",
"=",
"transformed",
",",
"klass",
"=",
"PolarHistogram",
",",
"dropna",
"=",
"dropna",
")",
"if",
"isinstance",
"(",
"phi_bins",
",",
"int",
")",
":",
"phi_range",
"=",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
")",
"if",
"\"phi_range\"",
"in",
"\"kwargs\"",
":",
"phi_range",
"=",
"kwargs",
"[",
"\"phi_range\"",
"]",
"elif",
"\"range\"",
"in",
"\"kwargs\"",
":",
"phi_range",
"=",
"kwargs",
"[",
"\"range\"",
"]",
"[",
"1",
"]",
"phi_range",
"=",
"list",
"(",
"phi_range",
")",
"+",
"[",
"phi_bins",
"+",
"1",
"]",
"phi_bins",
"=",
"np",
".",
"linspace",
"(",
"*",
"phi_range",
")",
"bin_schemas",
"=",
"binnings",
".",
"calculate_bins_nd",
"(",
"data",
",",
"[",
"radial_bins",
",",
"phi_bins",
"]",
",",
"*",
"args",
",",
"check_nan",
"=",
"not",
"dropna",
",",
"*",
"*",
"kwargs",
")",
"weights",
"=",
"kwargs",
".",
"pop",
"(",
"\"weights\"",
",",
"None",
")",
"frequencies",
",",
"errors2",
",",
"missed",
"=",
"histogram_nd",
".",
"calculate_frequencies",
"(",
"data",
",",
"ndim",
"=",
"2",
",",
"binnings",
"=",
"bin_schemas",
",",
"weights",
"=",
"weights",
")",
"return",
"PolarHistogram",
"(",
"binnings",
"=",
"bin_schemas",
",",
"frequencies",
"=",
"frequencies",
",",
"errors2",
"=",
"errors2",
",",
"missed",
"=",
"missed",
")"
] | Facade construction function for the PolarHistogram.
Parameters
----------
transformed : bool
phi_range : Optional[tuple]
range | [
"Facade",
"construction",
"function",
"for",
"the",
"PolarHistogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/special.py#L348-L377 | train |
janpipek/physt | physt/special.py | spherical_histogram | def spherical_histogram(data=None, radial_bins="numpy", theta_bins=16, phi_bins=16, transformed=False, *args, **kwargs):
"""Facade construction function for the SphericalHistogram.
"""
dropna = kwargs.pop("dropna", True)
data = _prepare_data(data, transformed=transformed, klass=SphericalHistogram, dropna=dropna)
if isinstance(theta_bins, int):
theta_range = (0, np.pi)
if "theta_range" in "kwargs":
theta_range = kwargs["theta_range"]
elif "range" in "kwargs":
theta_range = kwargs["range"][1]
theta_range = list(theta_range) + [theta_bins + 1]
theta_bins = np.linspace(*theta_range)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][2]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, theta_bins, phi_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3,
binnings=bin_schemas,
weights=weights)
return SphericalHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed) | python | def spherical_histogram(data=None, radial_bins="numpy", theta_bins=16, phi_bins=16, transformed=False, *args, **kwargs):
"""Facade construction function for the SphericalHistogram.
"""
dropna = kwargs.pop("dropna", True)
data = _prepare_data(data, transformed=transformed, klass=SphericalHistogram, dropna=dropna)
if isinstance(theta_bins, int):
theta_range = (0, np.pi)
if "theta_range" in "kwargs":
theta_range = kwargs["theta_range"]
elif "range" in "kwargs":
theta_range = kwargs["range"][1]
theta_range = list(theta_range) + [theta_bins + 1]
theta_bins = np.linspace(*theta_range)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][2]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, theta_bins, phi_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3,
binnings=bin_schemas,
weights=weights)
return SphericalHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed) | [
"def",
"spherical_histogram",
"(",
"data",
"=",
"None",
",",
"radial_bins",
"=",
"\"numpy\"",
",",
"theta_bins",
"=",
"16",
",",
"phi_bins",
"=",
"16",
",",
"transformed",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"dropna",
"=",
"kwargs",
".",
"pop",
"(",
"\"dropna\"",
",",
"True",
")",
"data",
"=",
"_prepare_data",
"(",
"data",
",",
"transformed",
"=",
"transformed",
",",
"klass",
"=",
"SphericalHistogram",
",",
"dropna",
"=",
"dropna",
")",
"if",
"isinstance",
"(",
"theta_bins",
",",
"int",
")",
":",
"theta_range",
"=",
"(",
"0",
",",
"np",
".",
"pi",
")",
"if",
"\"theta_range\"",
"in",
"\"kwargs\"",
":",
"theta_range",
"=",
"kwargs",
"[",
"\"theta_range\"",
"]",
"elif",
"\"range\"",
"in",
"\"kwargs\"",
":",
"theta_range",
"=",
"kwargs",
"[",
"\"range\"",
"]",
"[",
"1",
"]",
"theta_range",
"=",
"list",
"(",
"theta_range",
")",
"+",
"[",
"theta_bins",
"+",
"1",
"]",
"theta_bins",
"=",
"np",
".",
"linspace",
"(",
"*",
"theta_range",
")",
"if",
"isinstance",
"(",
"phi_bins",
",",
"int",
")",
":",
"phi_range",
"=",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
")",
"if",
"\"phi_range\"",
"in",
"\"kwargs\"",
":",
"phi_range",
"=",
"kwargs",
"[",
"\"phi_range\"",
"]",
"elif",
"\"range\"",
"in",
"\"kwargs\"",
":",
"phi_range",
"=",
"kwargs",
"[",
"\"range\"",
"]",
"[",
"2",
"]",
"phi_range",
"=",
"list",
"(",
"phi_range",
")",
"+",
"[",
"phi_bins",
"+",
"1",
"]",
"phi_bins",
"=",
"np",
".",
"linspace",
"(",
"*",
"phi_range",
")",
"bin_schemas",
"=",
"binnings",
".",
"calculate_bins_nd",
"(",
"data",
",",
"[",
"radial_bins",
",",
"theta_bins",
",",
"phi_bins",
"]",
",",
"*",
"args",
",",
"check_nan",
"=",
"not",
"dropna",
",",
"*",
"*",
"kwargs",
")",
"weights",
"=",
"kwargs",
".",
"pop",
"(",
"\"weights\"",
",",
"None",
")",
"frequencies",
",",
"errors2",
",",
"missed",
"=",
"histogram_nd",
".",
"calculate_frequencies",
"(",
"data",
",",
"ndim",
"=",
"3",
",",
"binnings",
"=",
"bin_schemas",
",",
"weights",
"=",
"weights",
")",
"return",
"SphericalHistogram",
"(",
"binnings",
"=",
"bin_schemas",
",",
"frequencies",
"=",
"frequencies",
",",
"errors2",
"=",
"errors2",
",",
"missed",
"=",
"missed",
")"
] | Facade construction function for the SphericalHistogram. | [
"Facade",
"construction",
"function",
"for",
"the",
"SphericalHistogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/special.py#L380-L412 | train |
janpipek/physt | physt/special.py | cylindrical_histogram | def cylindrical_histogram(data=None, rho_bins="numpy", phi_bins=16, z_bins="numpy", transformed=False, *args, **kwargs):
"""Facade construction function for the CylindricalHistogram.
"""
dropna = kwargs.pop("dropna", True)
data = _prepare_data(data, transformed=transformed, klass=CylindricalHistogram, dropna=dropna)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][1]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [rho_bins, phi_bins, z_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3,
binnings=bin_schemas,
weights=weights)
return CylindricalHistogram(binnings=bin_schemas, frequencies=frequencies,
errors2=errors2, missed=missed) | python | def cylindrical_histogram(data=None, rho_bins="numpy", phi_bins=16, z_bins="numpy", transformed=False, *args, **kwargs):
"""Facade construction function for the CylindricalHistogram.
"""
dropna = kwargs.pop("dropna", True)
data = _prepare_data(data, transformed=transformed, klass=CylindricalHistogram, dropna=dropna)
if isinstance(phi_bins, int):
phi_range = (0, 2 * np.pi)
if "phi_range" in "kwargs":
phi_range = kwargs["phi_range"]
elif "range" in "kwargs":
phi_range = kwargs["range"][1]
phi_range = list(phi_range) + [phi_bins + 1]
phi_bins = np.linspace(*phi_range)
bin_schemas = binnings.calculate_bins_nd(data, [rho_bins, phi_bins, z_bins], *args,
check_nan=not dropna, **kwargs)
weights = kwargs.pop("weights", None)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3,
binnings=bin_schemas,
weights=weights)
return CylindricalHistogram(binnings=bin_schemas, frequencies=frequencies,
errors2=errors2, missed=missed) | [
"def",
"cylindrical_histogram",
"(",
"data",
"=",
"None",
",",
"rho_bins",
"=",
"\"numpy\"",
",",
"phi_bins",
"=",
"16",
",",
"z_bins",
"=",
"\"numpy\"",
",",
"transformed",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"dropna",
"=",
"kwargs",
".",
"pop",
"(",
"\"dropna\"",
",",
"True",
")",
"data",
"=",
"_prepare_data",
"(",
"data",
",",
"transformed",
"=",
"transformed",
",",
"klass",
"=",
"CylindricalHistogram",
",",
"dropna",
"=",
"dropna",
")",
"if",
"isinstance",
"(",
"phi_bins",
",",
"int",
")",
":",
"phi_range",
"=",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
")",
"if",
"\"phi_range\"",
"in",
"\"kwargs\"",
":",
"phi_range",
"=",
"kwargs",
"[",
"\"phi_range\"",
"]",
"elif",
"\"range\"",
"in",
"\"kwargs\"",
":",
"phi_range",
"=",
"kwargs",
"[",
"\"range\"",
"]",
"[",
"1",
"]",
"phi_range",
"=",
"list",
"(",
"phi_range",
")",
"+",
"[",
"phi_bins",
"+",
"1",
"]",
"phi_bins",
"=",
"np",
".",
"linspace",
"(",
"*",
"phi_range",
")",
"bin_schemas",
"=",
"binnings",
".",
"calculate_bins_nd",
"(",
"data",
",",
"[",
"rho_bins",
",",
"phi_bins",
",",
"z_bins",
"]",
",",
"*",
"args",
",",
"check_nan",
"=",
"not",
"dropna",
",",
"*",
"*",
"kwargs",
")",
"weights",
"=",
"kwargs",
".",
"pop",
"(",
"\"weights\"",
",",
"None",
")",
"frequencies",
",",
"errors2",
",",
"missed",
"=",
"histogram_nd",
".",
"calculate_frequencies",
"(",
"data",
",",
"ndim",
"=",
"3",
",",
"binnings",
"=",
"bin_schemas",
",",
"weights",
"=",
"weights",
")",
"return",
"CylindricalHistogram",
"(",
"binnings",
"=",
"bin_schemas",
",",
"frequencies",
"=",
"frequencies",
",",
"errors2",
"=",
"errors2",
",",
"missed",
"=",
"missed",
")"
] | Facade construction function for the CylindricalHistogram. | [
"Facade",
"construction",
"function",
"for",
"the",
"CylindricalHistogram",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/special.py#L415-L439 | train |
janpipek/physt | physt/special.py | TransformedHistogramMixin.projection | def projection(self, *axes, **kwargs):
"""Projection to lower-dimensional histogram.
The inheriting class should implement the _projection_class_map
class attribute to suggest class for the projection. If the
arguments don't match any of the map keys, HistogramND is used.
"""
axes, _ = self._get_projection_axes(*axes)
axes = tuple(sorted(axes))
if axes in self._projection_class_map:
klass = self._projection_class_map[axes]
return HistogramND.projection(self, *axes, type=klass, **kwargs)
else:
return HistogramND.projection(self, *axes, **kwargs) | python | def projection(self, *axes, **kwargs):
"""Projection to lower-dimensional histogram.
The inheriting class should implement the _projection_class_map
class attribute to suggest class for the projection. If the
arguments don't match any of the map keys, HistogramND is used.
"""
axes, _ = self._get_projection_axes(*axes)
axes = tuple(sorted(axes))
if axes in self._projection_class_map:
klass = self._projection_class_map[axes]
return HistogramND.projection(self, *axes, type=klass, **kwargs)
else:
return HistogramND.projection(self, *axes, **kwargs) | [
"def",
"projection",
"(",
"self",
",",
"*",
"axes",
",",
"*",
"*",
"kwargs",
")",
":",
"axes",
",",
"_",
"=",
"self",
".",
"_get_projection_axes",
"(",
"*",
"axes",
")",
"axes",
"=",
"tuple",
"(",
"sorted",
"(",
"axes",
")",
")",
"if",
"axes",
"in",
"self",
".",
"_projection_class_map",
":",
"klass",
"=",
"self",
".",
"_projection_class_map",
"[",
"axes",
"]",
"return",
"HistogramND",
".",
"projection",
"(",
"self",
",",
"*",
"axes",
",",
"type",
"=",
"klass",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"HistogramND",
".",
"projection",
"(",
"self",
",",
"*",
"axes",
",",
"*",
"*",
"kwargs",
")"
] | Projection to lower-dimensional histogram.
The inheriting class should implement the _projection_class_map
class attribute to suggest class for the projection. If the
arguments don't match any of the map keys, HistogramND is used. | [
"Projection",
"to",
"lower",
"-",
"dimensional",
"histogram",
".",
"The",
"inheriting",
"class",
"should",
"implement",
"the",
"_projection_class_map",
"class",
"attribute",
"to",
"suggest",
"class",
"for",
"the",
"projection",
".",
"If",
"the",
"arguments",
"don",
"t",
"match",
"any",
"of",
"the",
"map",
"keys",
"HistogramND",
"is",
"used",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/special.py#L86-L99 | train |
janpipek/physt | physt/plotting/plotly.py | enable_collection | def enable_collection(f):
"""Call the wrapped function with a HistogramCollection as argument."""
@wraps(f)
def new_f(h: AbstractHistogram1D, **kwargs):
from physt.histogram_collection import HistogramCollection
if isinstance(h, HistogramCollection):
return f(h, **kwargs)
else:
return f(HistogramCollection(h), **kwargs)
return new_f | python | def enable_collection(f):
"""Call the wrapped function with a HistogramCollection as argument."""
@wraps(f)
def new_f(h: AbstractHistogram1D, **kwargs):
from physt.histogram_collection import HistogramCollection
if isinstance(h, HistogramCollection):
return f(h, **kwargs)
else:
return f(HistogramCollection(h), **kwargs)
return new_f | [
"def",
"enable_collection",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"new_f",
"(",
"h",
":",
"AbstractHistogram1D",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"physt",
".",
"histogram_collection",
"import",
"HistogramCollection",
"if",
"isinstance",
"(",
"h",
",",
"HistogramCollection",
")",
":",
"return",
"f",
"(",
"h",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"f",
"(",
"HistogramCollection",
"(",
"h",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_f"
] | Call the wrapped function with a HistogramCollection as argument. | [
"Call",
"the",
"wrapped",
"function",
"with",
"a",
"HistogramCollection",
"as",
"argument",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/plotly.py#L80-L89 | train |
janpipek/physt | physt/plotting/plotly.py | bar | def bar(h: Histogram2D, *,
barmode: str = DEFAULT_BARMODE,
alpha: float = DEFAULT_ALPHA,
**kwargs):
"""Bar plot.
Parameters
----------
alpha: Opacity (0.0 - 1.0)
barmode : "overlay" | "group" | "stack"
"""
get_data_kwargs = pop_many(kwargs, "density", "cumulative", "flatten")
data = [go.Bar(
x=histogram.bin_centers,
y=get_data(histogram, **get_data_kwargs),
width=histogram.bin_widths,
name=histogram.name,
opacity=alpha,
**kwargs
) for histogram in h]
layout = go.Layout(barmode=barmode)
_add_ticks(layout.xaxis, h[0], kwargs)
figure = go.Figure(data=data, layout=layout)
return figure | python | def bar(h: Histogram2D, *,
barmode: str = DEFAULT_BARMODE,
alpha: float = DEFAULT_ALPHA,
**kwargs):
"""Bar plot.
Parameters
----------
alpha: Opacity (0.0 - 1.0)
barmode : "overlay" | "group" | "stack"
"""
get_data_kwargs = pop_many(kwargs, "density", "cumulative", "flatten")
data = [go.Bar(
x=histogram.bin_centers,
y=get_data(histogram, **get_data_kwargs),
width=histogram.bin_widths,
name=histogram.name,
opacity=alpha,
**kwargs
) for histogram in h]
layout = go.Layout(barmode=barmode)
_add_ticks(layout.xaxis, h[0], kwargs)
figure = go.Figure(data=data, layout=layout)
return figure | [
"def",
"bar",
"(",
"h",
":",
"Histogram2D",
",",
"*",
",",
"barmode",
":",
"str",
"=",
"DEFAULT_BARMODE",
",",
"alpha",
":",
"float",
"=",
"DEFAULT_ALPHA",
",",
"*",
"*",
"kwargs",
")",
":",
"get_data_kwargs",
"=",
"pop_many",
"(",
"kwargs",
",",
"\"density\"",
",",
"\"cumulative\"",
",",
"\"flatten\"",
")",
"data",
"=",
"[",
"go",
".",
"Bar",
"(",
"x",
"=",
"histogram",
".",
"bin_centers",
",",
"y",
"=",
"get_data",
"(",
"histogram",
",",
"*",
"*",
"get_data_kwargs",
")",
",",
"width",
"=",
"histogram",
".",
"bin_widths",
",",
"name",
"=",
"histogram",
".",
"name",
",",
"opacity",
"=",
"alpha",
",",
"*",
"*",
"kwargs",
")",
"for",
"histogram",
"in",
"h",
"]",
"layout",
"=",
"go",
".",
"Layout",
"(",
"barmode",
"=",
"barmode",
")",
"_add_ticks",
"(",
"layout",
".",
"xaxis",
",",
"h",
"[",
"0",
"]",
",",
"kwargs",
")",
"figure",
"=",
"go",
".",
"Figure",
"(",
"data",
"=",
"data",
",",
"layout",
"=",
"layout",
")",
"return",
"figure"
] | Bar plot.
Parameters
----------
alpha: Opacity (0.0 - 1.0)
barmode : "overlay" | "group" | "stack" | [
"Bar",
"plot",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/plotly.py#L143-L169 | train |
janpipek/physt | physt/plotting/folium.py | _bins_to_json | def _bins_to_json(h2):
"""Create GeoJSON representation of histogram bins
Parameters
----------
h2: physt.histogram_nd.Histogram2D
A histogram of coordinates (in degrees)
Returns
-------
geo_json : dict
"""
south = h2.get_bin_left_edges(0)
north = h2.get_bin_right_edges(0)
west = h2.get_bin_left_edges(1)
east = h2.get_bin_right_edges(1)
return {
"type":"FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Polygon",
# Note that folium and GeoJson have them swapped
"coordinates": [[
[east[j], south[i]],
[east[j], north[i]],
[west[j], north[i]],
[west[j], south[i]],
[east[j], south[i]]]],
},
"properties" : {
"count": float(h2.frequencies[i, j])
}
}
for i in range(h2.shape[0])
for j in range(h2.shape[1])
]
} | python | def _bins_to_json(h2):
"""Create GeoJSON representation of histogram bins
Parameters
----------
h2: physt.histogram_nd.Histogram2D
A histogram of coordinates (in degrees)
Returns
-------
geo_json : dict
"""
south = h2.get_bin_left_edges(0)
north = h2.get_bin_right_edges(0)
west = h2.get_bin_left_edges(1)
east = h2.get_bin_right_edges(1)
return {
"type":"FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Polygon",
# Note that folium and GeoJson have them swapped
"coordinates": [[
[east[j], south[i]],
[east[j], north[i]],
[west[j], north[i]],
[west[j], south[i]],
[east[j], south[i]]]],
},
"properties" : {
"count": float(h2.frequencies[i, j])
}
}
for i in range(h2.shape[0])
for j in range(h2.shape[1])
]
} | [
"def",
"_bins_to_json",
"(",
"h2",
")",
":",
"south",
"=",
"h2",
".",
"get_bin_left_edges",
"(",
"0",
")",
"north",
"=",
"h2",
".",
"get_bin_right_edges",
"(",
"0",
")",
"west",
"=",
"h2",
".",
"get_bin_left_edges",
"(",
"1",
")",
"east",
"=",
"h2",
".",
"get_bin_right_edges",
"(",
"1",
")",
"return",
"{",
"\"type\"",
":",
"\"FeatureCollection\"",
",",
"\"features\"",
":",
"[",
"{",
"\"type\"",
":",
"\"Feature\"",
",",
"\"geometry\"",
":",
"{",
"\"type\"",
":",
"\"Polygon\"",
",",
"# Note that folium and GeoJson have them swapped",
"\"coordinates\"",
":",
"[",
"[",
"[",
"east",
"[",
"j",
"]",
",",
"south",
"[",
"i",
"]",
"]",
",",
"[",
"east",
"[",
"j",
"]",
",",
"north",
"[",
"i",
"]",
"]",
",",
"[",
"west",
"[",
"j",
"]",
",",
"north",
"[",
"i",
"]",
"]",
",",
"[",
"west",
"[",
"j",
"]",
",",
"south",
"[",
"i",
"]",
"]",
",",
"[",
"east",
"[",
"j",
"]",
",",
"south",
"[",
"i",
"]",
"]",
"]",
"]",
",",
"}",
",",
"\"properties\"",
":",
"{",
"\"count\"",
":",
"float",
"(",
"h2",
".",
"frequencies",
"[",
"i",
",",
"j",
"]",
")",
"}",
"}",
"for",
"i",
"in",
"range",
"(",
"h2",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"h2",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"}"
] | Create GeoJSON representation of histogram bins
Parameters
----------
h2: physt.histogram_nd.Histogram2D
A histogram of coordinates (in degrees)
Returns
-------
geo_json : dict | [
"Create",
"GeoJSON",
"representation",
"of",
"histogram",
"bins"
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/folium.py#L16-L54 | train |
janpipek/physt | physt/plotting/folium.py | geo_map | def geo_map(h2, map=None, tiles='stamenterrain', cmap="wk", alpha=0.5, lw=1, fit_bounds=None, layer_name=None):
"""Show rectangular grid over a map.
Parameters
----------
h2: physt.histogram_nd.Histogram2D
A histogram of coordinates (in degrees: latitude, longitude)
map : folium.folium.Map
Returns
-------
map : folium.folium.Map
"""
if not map:
latitude = h2.get_bin_centers(0).mean()
longitude = h2.get_bin_centers(1).mean()
zoom_start = 10
map = folium.Map(location=[latitude, longitude], tiles=tiles)
if fit_bounds == None:
fit_bounds = True
geo_json = _bins_to_json(h2)
if not layer_name:
layer_name = h2.name
from branca.colormap import LinearColormap
color_map = LinearColormap(cmap, vmin=h2.frequencies.min(), vmax=h2.frequencies.max())
# legend = folium.Html("<div>Legend</div>")
# legend_div = folium.Div("20%", "20%", "75%", "5%")
#
# legend_div.add_to(map)
# legend_div.add_child(legend)
#xx = h2.frequencies.max()
def styling_function(bin):
count = bin["properties"]["count"]
return {
"fillColor": color_map(count),
"color": "black",
"fillOpacity": alpha if count > 0 else 0,
"weight": lw,
# "strokeWidth": lw,
"opacity": alpha if count > 0 else 0,
}# .update(styling)
layer = folium.GeoJson(geo_json, style_function=styling_function, name=layer_name)
layer.add_to(map)
if fit_bounds:
map.fit_bounds(layer.get_bounds())
return map | python | def geo_map(h2, map=None, tiles='stamenterrain', cmap="wk", alpha=0.5, lw=1, fit_bounds=None, layer_name=None):
"""Show rectangular grid over a map.
Parameters
----------
h2: physt.histogram_nd.Histogram2D
A histogram of coordinates (in degrees: latitude, longitude)
map : folium.folium.Map
Returns
-------
map : folium.folium.Map
"""
if not map:
latitude = h2.get_bin_centers(0).mean()
longitude = h2.get_bin_centers(1).mean()
zoom_start = 10
map = folium.Map(location=[latitude, longitude], tiles=tiles)
if fit_bounds == None:
fit_bounds = True
geo_json = _bins_to_json(h2)
if not layer_name:
layer_name = h2.name
from branca.colormap import LinearColormap
color_map = LinearColormap(cmap, vmin=h2.frequencies.min(), vmax=h2.frequencies.max())
# legend = folium.Html("<div>Legend</div>")
# legend_div = folium.Div("20%", "20%", "75%", "5%")
#
# legend_div.add_to(map)
# legend_div.add_child(legend)
#xx = h2.frequencies.max()
def styling_function(bin):
count = bin["properties"]["count"]
return {
"fillColor": color_map(count),
"color": "black",
"fillOpacity": alpha if count > 0 else 0,
"weight": lw,
# "strokeWidth": lw,
"opacity": alpha if count > 0 else 0,
}# .update(styling)
layer = folium.GeoJson(geo_json, style_function=styling_function, name=layer_name)
layer.add_to(map)
if fit_bounds:
map.fit_bounds(layer.get_bounds())
return map | [
"def",
"geo_map",
"(",
"h2",
",",
"map",
"=",
"None",
",",
"tiles",
"=",
"'stamenterrain'",
",",
"cmap",
"=",
"\"wk\"",
",",
"alpha",
"=",
"0.5",
",",
"lw",
"=",
"1",
",",
"fit_bounds",
"=",
"None",
",",
"layer_name",
"=",
"None",
")",
":",
"if",
"not",
"map",
":",
"latitude",
"=",
"h2",
".",
"get_bin_centers",
"(",
"0",
")",
".",
"mean",
"(",
")",
"longitude",
"=",
"h2",
".",
"get_bin_centers",
"(",
"1",
")",
".",
"mean",
"(",
")",
"zoom_start",
"=",
"10",
"map",
"=",
"folium",
".",
"Map",
"(",
"location",
"=",
"[",
"latitude",
",",
"longitude",
"]",
",",
"tiles",
"=",
"tiles",
")",
"if",
"fit_bounds",
"==",
"None",
":",
"fit_bounds",
"=",
"True",
"geo_json",
"=",
"_bins_to_json",
"(",
"h2",
")",
"if",
"not",
"layer_name",
":",
"layer_name",
"=",
"h2",
".",
"name",
"from",
"branca",
".",
"colormap",
"import",
"LinearColormap",
"color_map",
"=",
"LinearColormap",
"(",
"cmap",
",",
"vmin",
"=",
"h2",
".",
"frequencies",
".",
"min",
"(",
")",
",",
"vmax",
"=",
"h2",
".",
"frequencies",
".",
"max",
"(",
")",
")",
"# legend = folium.Html(\"<div>Legend</div>\")",
"# legend_div = folium.Div(\"20%\", \"20%\", \"75%\", \"5%\")",
"#",
"# legend_div.add_to(map)",
"# legend_div.add_child(legend)",
"#xx = h2.frequencies.max()",
"def",
"styling_function",
"(",
"bin",
")",
":",
"count",
"=",
"bin",
"[",
"\"properties\"",
"]",
"[",
"\"count\"",
"]",
"return",
"{",
"\"fillColor\"",
":",
"color_map",
"(",
"count",
")",
",",
"\"color\"",
":",
"\"black\"",
",",
"\"fillOpacity\"",
":",
"alpha",
"if",
"count",
">",
"0",
"else",
"0",
",",
"\"weight\"",
":",
"lw",
",",
"# \"strokeWidth\": lw,",
"\"opacity\"",
":",
"alpha",
"if",
"count",
">",
"0",
"else",
"0",
",",
"}",
"# .update(styling)",
"layer",
"=",
"folium",
".",
"GeoJson",
"(",
"geo_json",
",",
"style_function",
"=",
"styling_function",
",",
"name",
"=",
"layer_name",
")",
"layer",
".",
"add_to",
"(",
"map",
")",
"if",
"fit_bounds",
":",
"map",
".",
"fit_bounds",
"(",
"layer",
".",
"get_bounds",
"(",
")",
")",
"return",
"map"
] | Show rectangular grid over a map.
Parameters
----------
h2: physt.histogram_nd.Histogram2D
A histogram of coordinates (in degrees: latitude, longitude)
map : folium.folium.Map
Returns
-------
map : folium.folium.Map | [
"Show",
"rectangular",
"grid",
"over",
"a",
"map",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/folium.py#L57-L110 | train |
janpipek/physt | physt/compat/geant4.py | load_csv | def load_csv(path):
"""Loads a histogram as output from Geant4 analysis tools in CSV format.
Parameters
----------
path: str
Path to the CSV file
Returns
-------
physt.histogram1d.Histogram1D or physt.histogram_nd.Histogram2D
"""
meta = []
data = []
with codecs.open(path, encoding="ASCII") as in_file:
for line in in_file:
if line.startswith("#"):
key, value = line[1:].strip().split(" ", 1)
meta.append((key, value)) # TODO: There are duplicit entries :-()
else:
try:
data.append([float(frag) for frag in line.split(",")])
except:
pass
data = np.asarray(data)
ndim = int(_get(meta, "dimension"))
if ndim == 1:
return _create_h1(data, meta)
elif ndim == 2:
return _create_h2(data, meta) | python | def load_csv(path):
"""Loads a histogram as output from Geant4 analysis tools in CSV format.
Parameters
----------
path: str
Path to the CSV file
Returns
-------
physt.histogram1d.Histogram1D or physt.histogram_nd.Histogram2D
"""
meta = []
data = []
with codecs.open(path, encoding="ASCII") as in_file:
for line in in_file:
if line.startswith("#"):
key, value = line[1:].strip().split(" ", 1)
meta.append((key, value)) # TODO: There are duplicit entries :-()
else:
try:
data.append([float(frag) for frag in line.split(",")])
except:
pass
data = np.asarray(data)
ndim = int(_get(meta, "dimension"))
if ndim == 1:
return _create_h1(data, meta)
elif ndim == 2:
return _create_h2(data, meta) | [
"def",
"load_csv",
"(",
"path",
")",
":",
"meta",
"=",
"[",
"]",
"data",
"=",
"[",
"]",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"encoding",
"=",
"\"ASCII\"",
")",
"as",
"in_file",
":",
"for",
"line",
"in",
"in_file",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"key",
",",
"value",
"=",
"line",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
",",
"1",
")",
"meta",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")",
"# TODO: There are duplicit entries :-()",
"else",
":",
"try",
":",
"data",
".",
"append",
"(",
"[",
"float",
"(",
"frag",
")",
"for",
"frag",
"in",
"line",
".",
"split",
"(",
"\",\"",
")",
"]",
")",
"except",
":",
"pass",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"ndim",
"=",
"int",
"(",
"_get",
"(",
"meta",
",",
"\"dimension\"",
")",
")",
"if",
"ndim",
"==",
"1",
":",
"return",
"_create_h1",
"(",
"data",
",",
"meta",
")",
"elif",
"ndim",
"==",
"2",
":",
"return",
"_create_h2",
"(",
"data",
",",
"meta",
")"
] | Loads a histogram as output from Geant4 analysis tools in CSV format.
Parameters
----------
path: str
Path to the CSV file
Returns
-------
physt.histogram1d.Histogram1D or physt.histogram_nd.Histogram2D | [
"Loads",
"a",
"histogram",
"as",
"output",
"from",
"Geant4",
"analysis",
"tools",
"in",
"CSV",
"format",
"."
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/compat/geant4.py#L11-L40 | train |
janpipek/physt | physt/compat/geant4.py | _get | def _get(pseudodict, key, single=True):
"""Helper method for getting values from "multi-dict"s"""
matches = [item[1] for item in pseudodict if item[0] == key]
if single:
return matches[0]
else:
return matches | python | def _get(pseudodict, key, single=True):
"""Helper method for getting values from "multi-dict"s"""
matches = [item[1] for item in pseudodict if item[0] == key]
if single:
return matches[0]
else:
return matches | [
"def",
"_get",
"(",
"pseudodict",
",",
"key",
",",
"single",
"=",
"True",
")",
":",
"matches",
"=",
"[",
"item",
"[",
"1",
"]",
"for",
"item",
"in",
"pseudodict",
"if",
"item",
"[",
"0",
"]",
"==",
"key",
"]",
"if",
"single",
":",
"return",
"matches",
"[",
"0",
"]",
"else",
":",
"return",
"matches"
] | Helper method for getting values from "multi-dict"s | [
"Helper",
"method",
"for",
"getting",
"values",
"from",
"multi",
"-",
"dict",
"s"
] | 6dd441b073514e7728235f50b2352d56aacf38d4 | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/compat/geant4.py#L43-L49 | train |
openid/python-openid | openid/yadis/xrires.py | ProxyResolver.queryURL | def queryURL(self, xri, service_type=None):
"""Build a URL to query the proxy resolver.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_type: The service type to resolve, if you desire
service endpoint selection. A service type is a URI.
@type service_type: str
@returns: a URL
@returntype: str
"""
# Trim off the xri:// prefix. The proxy resolver didn't accept it
# when this code was written, but that may (or may not) change for
# XRI Resolution 2.0 Working Draft 11.
qxri = toURINormal(xri)[6:]
hxri = self.proxy_url + qxri
args = {
# XXX: If the proxy resolver will ensure that it doesn't return
# bogus CanonicalIDs (as per Steve's message of 15 Aug 2006
# 11:13:42), then we could ask for application/xrd+xml instead,
# which would give us a bit less to process.
'_xrd_r': 'application/xrds+xml',
}
if service_type:
args['_xrd_t'] = service_type
else:
# Don't perform service endpoint selection.
args['_xrd_r'] += ';sep=false'
query = _appendArgs(hxri, args)
return query | python | def queryURL(self, xri, service_type=None):
"""Build a URL to query the proxy resolver.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_type: The service type to resolve, if you desire
service endpoint selection. A service type is a URI.
@type service_type: str
@returns: a URL
@returntype: str
"""
# Trim off the xri:// prefix. The proxy resolver didn't accept it
# when this code was written, but that may (or may not) change for
# XRI Resolution 2.0 Working Draft 11.
qxri = toURINormal(xri)[6:]
hxri = self.proxy_url + qxri
args = {
# XXX: If the proxy resolver will ensure that it doesn't return
# bogus CanonicalIDs (as per Steve's message of 15 Aug 2006
# 11:13:42), then we could ask for application/xrd+xml instead,
# which would give us a bit less to process.
'_xrd_r': 'application/xrds+xml',
}
if service_type:
args['_xrd_t'] = service_type
else:
# Don't perform service endpoint selection.
args['_xrd_r'] += ';sep=false'
query = _appendArgs(hxri, args)
return query | [
"def",
"queryURL",
"(",
"self",
",",
"xri",
",",
"service_type",
"=",
"None",
")",
":",
"# Trim off the xri:// prefix. The proxy resolver didn't accept it",
"# when this code was written, but that may (or may not) change for",
"# XRI Resolution 2.0 Working Draft 11.",
"qxri",
"=",
"toURINormal",
"(",
"xri",
")",
"[",
"6",
":",
"]",
"hxri",
"=",
"self",
".",
"proxy_url",
"+",
"qxri",
"args",
"=",
"{",
"# XXX: If the proxy resolver will ensure that it doesn't return",
"# bogus CanonicalIDs (as per Steve's message of 15 Aug 2006",
"# 11:13:42), then we could ask for application/xrd+xml instead,",
"# which would give us a bit less to process.",
"'_xrd_r'",
":",
"'application/xrds+xml'",
",",
"}",
"if",
"service_type",
":",
"args",
"[",
"'_xrd_t'",
"]",
"=",
"service_type",
"else",
":",
"# Don't perform service endpoint selection.",
"args",
"[",
"'_xrd_r'",
"]",
"+=",
"';sep=false'",
"query",
"=",
"_appendArgs",
"(",
"hxri",
",",
"args",
")",
"return",
"query"
] | Build a URL to query the proxy resolver.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_type: The service type to resolve, if you desire
service endpoint selection. A service type is a URI.
@type service_type: str
@returns: a URL
@returntype: str | [
"Build",
"a",
"URL",
"to",
"query",
"the",
"proxy",
"resolver",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/xrires.py#L20-L51 | train |
openid/python-openid | openid/yadis/xrires.py | ProxyResolver.query | def query(self, xri, service_types):
"""Resolve some services for an XRI.
Note: I don't implement any service endpoint selection beyond what
the resolver I'm querying does, so the Services I return may well
include Services that were not of the types you asked for.
May raise fetchers.HTTPFetchingError or L{etxrd.XRDSError} if
the fetching or parsing don't go so well.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_types: A list of services types to query for. Service
types are URIs.
@type service_types: list of str
@returns: tuple of (CanonicalID, Service elements)
@returntype: (unicode, list of C{ElementTree.Element}s)
"""
# FIXME: No test coverage!
services = []
# Make a seperate request to the proxy resolver for each service
# type, as, if it is following Refs, it could return a different
# XRDS for each.
canonicalID = None
for service_type in service_types:
url = self.queryURL(xri, service_type)
response = fetchers.fetch(url)
if response.status not in (200, 206):
# XXX: sucks to fail silently.
# print "response not OK:", response
continue
et = etxrd.parseXRDS(response.body)
canonicalID = etxrd.getCanonicalID(xri, et)
some_services = list(iterServices(et))
services.extend(some_services)
# TODO:
# * If we do get hits for multiple service_types, we're almost
# certainly going to have duplicated service entries and
# broken priority ordering.
return canonicalID, services | python | def query(self, xri, service_types):
"""Resolve some services for an XRI.
Note: I don't implement any service endpoint selection beyond what
the resolver I'm querying does, so the Services I return may well
include Services that were not of the types you asked for.
May raise fetchers.HTTPFetchingError or L{etxrd.XRDSError} if
the fetching or parsing don't go so well.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_types: A list of services types to query for. Service
types are URIs.
@type service_types: list of str
@returns: tuple of (CanonicalID, Service elements)
@returntype: (unicode, list of C{ElementTree.Element}s)
"""
# FIXME: No test coverage!
services = []
# Make a seperate request to the proxy resolver for each service
# type, as, if it is following Refs, it could return a different
# XRDS for each.
canonicalID = None
for service_type in service_types:
url = self.queryURL(xri, service_type)
response = fetchers.fetch(url)
if response.status not in (200, 206):
# XXX: sucks to fail silently.
# print "response not OK:", response
continue
et = etxrd.parseXRDS(response.body)
canonicalID = etxrd.getCanonicalID(xri, et)
some_services = list(iterServices(et))
services.extend(some_services)
# TODO:
# * If we do get hits for multiple service_types, we're almost
# certainly going to have duplicated service entries and
# broken priority ordering.
return canonicalID, services | [
"def",
"query",
"(",
"self",
",",
"xri",
",",
"service_types",
")",
":",
"# FIXME: No test coverage!",
"services",
"=",
"[",
"]",
"# Make a seperate request to the proxy resolver for each service",
"# type, as, if it is following Refs, it could return a different",
"# XRDS for each.",
"canonicalID",
"=",
"None",
"for",
"service_type",
"in",
"service_types",
":",
"url",
"=",
"self",
".",
"queryURL",
"(",
"xri",
",",
"service_type",
")",
"response",
"=",
"fetchers",
".",
"fetch",
"(",
"url",
")",
"if",
"response",
".",
"status",
"not",
"in",
"(",
"200",
",",
"206",
")",
":",
"# XXX: sucks to fail silently.",
"# print \"response not OK:\", response",
"continue",
"et",
"=",
"etxrd",
".",
"parseXRDS",
"(",
"response",
".",
"body",
")",
"canonicalID",
"=",
"etxrd",
".",
"getCanonicalID",
"(",
"xri",
",",
"et",
")",
"some_services",
"=",
"list",
"(",
"iterServices",
"(",
"et",
")",
")",
"services",
".",
"extend",
"(",
"some_services",
")",
"# TODO:",
"# * If we do get hits for multiple service_types, we're almost",
"# certainly going to have duplicated service entries and",
"# broken priority ordering.",
"return",
"canonicalID",
",",
"services"
] | Resolve some services for an XRI.
Note: I don't implement any service endpoint selection beyond what
the resolver I'm querying does, so the Services I return may well
include Services that were not of the types you asked for.
May raise fetchers.HTTPFetchingError or L{etxrd.XRDSError} if
the fetching or parsing don't go so well.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_types: A list of services types to query for. Service
types are URIs.
@type service_types: list of str
@returns: tuple of (CanonicalID, Service elements)
@returntype: (unicode, list of C{ElementTree.Element}s) | [
"Resolve",
"some",
"services",
"for",
"an",
"XRI",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/xrires.py#L54-L97 | train |
openid/python-openid | openid/yadis/accept.py | generateAcceptHeader | def generateAcceptHeader(*elements):
"""Generate an accept header value
[str or (str, float)] -> str
"""
parts = []
for element in elements:
if type(element) is str:
qs = "1.0"
mtype = element
else:
mtype, q = element
q = float(q)
if q > 1 or q <= 0:
raise ValueError('Invalid preference factor: %r' % q)
qs = '%0.1f' % (q,)
parts.append((qs, mtype))
parts.sort()
chunks = []
for q, mtype in parts:
if q == '1.0':
chunks.append(mtype)
else:
chunks.append('%s; q=%s' % (mtype, q))
return ', '.join(chunks) | python | def generateAcceptHeader(*elements):
"""Generate an accept header value
[str or (str, float)] -> str
"""
parts = []
for element in elements:
if type(element) is str:
qs = "1.0"
mtype = element
else:
mtype, q = element
q = float(q)
if q > 1 or q <= 0:
raise ValueError('Invalid preference factor: %r' % q)
qs = '%0.1f' % (q,)
parts.append((qs, mtype))
parts.sort()
chunks = []
for q, mtype in parts:
if q == '1.0':
chunks.append(mtype)
else:
chunks.append('%s; q=%s' % (mtype, q))
return ', '.join(chunks) | [
"def",
"generateAcceptHeader",
"(",
"*",
"elements",
")",
":",
"parts",
"=",
"[",
"]",
"for",
"element",
"in",
"elements",
":",
"if",
"type",
"(",
"element",
")",
"is",
"str",
":",
"qs",
"=",
"\"1.0\"",
"mtype",
"=",
"element",
"else",
":",
"mtype",
",",
"q",
"=",
"element",
"q",
"=",
"float",
"(",
"q",
")",
"if",
"q",
">",
"1",
"or",
"q",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Invalid preference factor: %r'",
"%",
"q",
")",
"qs",
"=",
"'%0.1f'",
"%",
"(",
"q",
",",
")",
"parts",
".",
"append",
"(",
"(",
"qs",
",",
"mtype",
")",
")",
"parts",
".",
"sort",
"(",
")",
"chunks",
"=",
"[",
"]",
"for",
"q",
",",
"mtype",
"in",
"parts",
":",
"if",
"q",
"==",
"'1.0'",
":",
"chunks",
".",
"append",
"(",
"mtype",
")",
"else",
":",
"chunks",
".",
"append",
"(",
"'%s; q=%s'",
"%",
"(",
"mtype",
",",
"q",
")",
")",
"return",
"', '",
".",
"join",
"(",
"chunks",
")"
] | Generate an accept header value
[str or (str, float)] -> str | [
"Generate",
"an",
"accept",
"header",
"value"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/accept.py#L5-L33 | train |
openid/python-openid | openid/yadis/accept.py | parseAcceptHeader | def parseAcceptHeader(value):
"""Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)]
"""
chunks = [chunk.strip() for chunk in value.split(',')]
accept = []
for chunk in chunks:
parts = [s.strip() for s in chunk.split(';')]
mtype = parts.pop(0)
if '/' not in mtype:
# This is not a MIME type, so ignore the bad data
continue
main, sub = mtype.split('/', 1)
for ext in parts:
if '=' in ext:
k, v = ext.split('=', 1)
if k == 'q':
try:
q = float(v)
break
except ValueError:
# Ignore poorly formed q-values
pass
else:
q = 1.0
accept.append((q, main, sub))
accept.sort()
accept.reverse()
return [(main, sub, q) for (q, main, sub) in accept] | python | def parseAcceptHeader(value):
"""Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)]
"""
chunks = [chunk.strip() for chunk in value.split(',')]
accept = []
for chunk in chunks:
parts = [s.strip() for s in chunk.split(';')]
mtype = parts.pop(0)
if '/' not in mtype:
# This is not a MIME type, so ignore the bad data
continue
main, sub = mtype.split('/', 1)
for ext in parts:
if '=' in ext:
k, v = ext.split('=', 1)
if k == 'q':
try:
q = float(v)
break
except ValueError:
# Ignore poorly formed q-values
pass
else:
q = 1.0
accept.append((q, main, sub))
accept.sort()
accept.reverse()
return [(main, sub, q) for (q, main, sub) in accept] | [
"def",
"parseAcceptHeader",
"(",
"value",
")",
":",
"chunks",
"=",
"[",
"chunk",
".",
"strip",
"(",
")",
"for",
"chunk",
"in",
"value",
".",
"split",
"(",
"','",
")",
"]",
"accept",
"=",
"[",
"]",
"for",
"chunk",
"in",
"chunks",
":",
"parts",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"chunk",
".",
"split",
"(",
"';'",
")",
"]",
"mtype",
"=",
"parts",
".",
"pop",
"(",
"0",
")",
"if",
"'/'",
"not",
"in",
"mtype",
":",
"# This is not a MIME type, so ignore the bad data",
"continue",
"main",
",",
"sub",
"=",
"mtype",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"for",
"ext",
"in",
"parts",
":",
"if",
"'='",
"in",
"ext",
":",
"k",
",",
"v",
"=",
"ext",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"k",
"==",
"'q'",
":",
"try",
":",
"q",
"=",
"float",
"(",
"v",
")",
"break",
"except",
"ValueError",
":",
"# Ignore poorly formed q-values",
"pass",
"else",
":",
"q",
"=",
"1.0",
"accept",
".",
"append",
"(",
"(",
"q",
",",
"main",
",",
"sub",
")",
")",
"accept",
".",
"sort",
"(",
")",
"accept",
".",
"reverse",
"(",
")",
"return",
"[",
"(",
"main",
",",
"sub",
",",
"q",
")",
"for",
"(",
"q",
",",
"main",
",",
"sub",
")",
"in",
"accept",
"]"
] | Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)] | [
"Parse",
"an",
"accept",
"header",
"ignoring",
"any",
"accept",
"-",
"extensions"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/accept.py#L35-L72 | train |
openid/python-openid | openid/yadis/accept.py | getAcceptable | def getAcceptable(accept_header, have_types):
"""Parse the accept header and return a list of available types in
preferred order. If a type is unacceptable, it will not be in the
resulting list.
This is a convenience wrapper around matchTypes and
parseAcceptHeader.
(str, [str]) -> [str]
"""
accepted = parseAcceptHeader(accept_header)
preferred = matchTypes(accepted, have_types)
return [mtype for (mtype, _) in preferred] | python | def getAcceptable(accept_header, have_types):
"""Parse the accept header and return a list of available types in
preferred order. If a type is unacceptable, it will not be in the
resulting list.
This is a convenience wrapper around matchTypes and
parseAcceptHeader.
(str, [str]) -> [str]
"""
accepted = parseAcceptHeader(accept_header)
preferred = matchTypes(accepted, have_types)
return [mtype for (mtype, _) in preferred] | [
"def",
"getAcceptable",
"(",
"accept_header",
",",
"have_types",
")",
":",
"accepted",
"=",
"parseAcceptHeader",
"(",
"accept_header",
")",
"preferred",
"=",
"matchTypes",
"(",
"accepted",
",",
"have_types",
")",
"return",
"[",
"mtype",
"for",
"(",
"mtype",
",",
"_",
")",
"in",
"preferred",
"]"
] | Parse the accept header and return a list of available types in
preferred order. If a type is unacceptable, it will not be in the
resulting list.
This is a convenience wrapper around matchTypes and
parseAcceptHeader.
(str, [str]) -> [str] | [
"Parse",
"the",
"accept",
"header",
"and",
"return",
"a",
"list",
"of",
"available",
"types",
"in",
"preferred",
"order",
".",
"If",
"a",
"type",
"is",
"unacceptable",
"it",
"will",
"not",
"be",
"in",
"the",
"resulting",
"list",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/accept.py#L121-L133 | train |
openid/python-openid | openid/message.py | Message.fromPostArgs | def fromPostArgs(cls, args):
"""Construct a Message containing a set of POST arguments.
"""
self = cls()
# Partition into "openid." args and bare args
openid_args = {}
for key, value in args.items():
if isinstance(value, list):
raise TypeError("query dict must have one value for each key, "
"not lists of values. Query is %r" % (args,))
try:
prefix, rest = key.split('.', 1)
except ValueError:
prefix = None
if prefix != 'openid':
self.args[(BARE_NS, key)] = value
else:
openid_args[rest] = value
self._fromOpenIDArgs(openid_args)
return self | python | def fromPostArgs(cls, args):
"""Construct a Message containing a set of POST arguments.
"""
self = cls()
# Partition into "openid." args and bare args
openid_args = {}
for key, value in args.items():
if isinstance(value, list):
raise TypeError("query dict must have one value for each key, "
"not lists of values. Query is %r" % (args,))
try:
prefix, rest = key.split('.', 1)
except ValueError:
prefix = None
if prefix != 'openid':
self.args[(BARE_NS, key)] = value
else:
openid_args[rest] = value
self._fromOpenIDArgs(openid_args)
return self | [
"def",
"fromPostArgs",
"(",
"cls",
",",
"args",
")",
":",
"self",
"=",
"cls",
"(",
")",
"# Partition into \"openid.\" args and bare args",
"openid_args",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"args",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"query dict must have one value for each key, \"",
"\"not lists of values. Query is %r\"",
"%",
"(",
"args",
",",
")",
")",
"try",
":",
"prefix",
",",
"rest",
"=",
"key",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"except",
"ValueError",
":",
"prefix",
"=",
"None",
"if",
"prefix",
"!=",
"'openid'",
":",
"self",
".",
"args",
"[",
"(",
"BARE_NS",
",",
"key",
")",
"]",
"=",
"value",
"else",
":",
"openid_args",
"[",
"rest",
"]",
"=",
"value",
"self",
".",
"_fromOpenIDArgs",
"(",
"openid_args",
")",
"return",
"self"
] | Construct a Message containing a set of POST arguments. | [
"Construct",
"a",
"Message",
"containing",
"a",
"set",
"of",
"POST",
"arguments",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/message.py#L143-L169 | train |
openid/python-openid | openid/message.py | Message.getKey | def getKey(self, namespace, ns_key):
"""Get the key for a particular namespaced argument"""
namespace = self._fixNS(namespace)
if namespace == BARE_NS:
return ns_key
ns_alias = self.namespaces.getAlias(namespace)
# No alias is defined, so no key can exist
if ns_alias is None:
return None
if ns_alias == NULL_NAMESPACE:
tail = ns_key
else:
tail = '%s.%s' % (ns_alias, ns_key)
return 'openid.' + tail | python | def getKey(self, namespace, ns_key):
"""Get the key for a particular namespaced argument"""
namespace = self._fixNS(namespace)
if namespace == BARE_NS:
return ns_key
ns_alias = self.namespaces.getAlias(namespace)
# No alias is defined, so no key can exist
if ns_alias is None:
return None
if ns_alias == NULL_NAMESPACE:
tail = ns_key
else:
tail = '%s.%s' % (ns_alias, ns_key)
return 'openid.' + tail | [
"def",
"getKey",
"(",
"self",
",",
"namespace",
",",
"ns_key",
")",
":",
"namespace",
"=",
"self",
".",
"_fixNS",
"(",
"namespace",
")",
"if",
"namespace",
"==",
"BARE_NS",
":",
"return",
"ns_key",
"ns_alias",
"=",
"self",
".",
"namespaces",
".",
"getAlias",
"(",
"namespace",
")",
"# No alias is defined, so no key can exist",
"if",
"ns_alias",
"is",
"None",
":",
"return",
"None",
"if",
"ns_alias",
"==",
"NULL_NAMESPACE",
":",
"tail",
"=",
"ns_key",
"else",
":",
"tail",
"=",
"'%s.%s'",
"%",
"(",
"ns_alias",
",",
"ns_key",
")",
"return",
"'openid.'",
"+",
"tail"
] | Get the key for a particular namespaced argument | [
"Get",
"the",
"key",
"for",
"a",
"particular",
"namespaced",
"argument"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/message.py#L402-L419 | train |
openid/python-openid | openid/message.py | Message.getArg | def getArg(self, namespace, key, default=None):
"""Get a value for a namespaced key.
@param namespace: The namespace in the message for this key
@type namespace: str
@param key: The key to get within this namespace
@type key: str
@param default: The value to use if this key is absent from
this message. Using the special value
openid.message.no_default will result in this method
raising a KeyError instead of returning the default.
@rtype: str or the type of default
@raises KeyError: if default is no_default
@raises UndefinedOpenIDNamespace: if the message has not yet
had an OpenID namespace set
"""
namespace = self._fixNS(namespace)
args_key = (namespace, key)
try:
return self.args[args_key]
except KeyError:
if default is no_default:
raise KeyError((namespace, key))
else:
return default | python | def getArg(self, namespace, key, default=None):
"""Get a value for a namespaced key.
@param namespace: The namespace in the message for this key
@type namespace: str
@param key: The key to get within this namespace
@type key: str
@param default: The value to use if this key is absent from
this message. Using the special value
openid.message.no_default will result in this method
raising a KeyError instead of returning the default.
@rtype: str or the type of default
@raises KeyError: if default is no_default
@raises UndefinedOpenIDNamespace: if the message has not yet
had an OpenID namespace set
"""
namespace = self._fixNS(namespace)
args_key = (namespace, key)
try:
return self.args[args_key]
except KeyError:
if default is no_default:
raise KeyError((namespace, key))
else:
return default | [
"def",
"getArg",
"(",
"self",
",",
"namespace",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"namespace",
"=",
"self",
".",
"_fixNS",
"(",
"namespace",
")",
"args_key",
"=",
"(",
"namespace",
",",
"key",
")",
"try",
":",
"return",
"self",
".",
"args",
"[",
"args_key",
"]",
"except",
"KeyError",
":",
"if",
"default",
"is",
"no_default",
":",
"raise",
"KeyError",
"(",
"(",
"namespace",
",",
"key",
")",
")",
"else",
":",
"return",
"default"
] | Get a value for a namespaced key.
@param namespace: The namespace in the message for this key
@type namespace: str
@param key: The key to get within this namespace
@type key: str
@param default: The value to use if this key is absent from
this message. Using the special value
openid.message.no_default will result in this method
raising a KeyError instead of returning the default.
@rtype: str or the type of default
@raises KeyError: if default is no_default
@raises UndefinedOpenIDNamespace: if the message has not yet
had an OpenID namespace set | [
"Get",
"a",
"value",
"for",
"a",
"namespaced",
"key",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/message.py#L421-L448 | train |
openid/python-openid | openid/message.py | NamespaceMap.add | def add(self, namespace_uri):
"""Add this namespace URI to the mapping, without caring what
alias it ends up with"""
# See if this namespace is already mapped to an alias
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None:
return alias
# Fall back to generating a numerical alias
i = 0
while True:
alias = 'ext' + str(i)
try:
self.addAlias(namespace_uri, alias)
except KeyError:
i += 1
else:
return alias
assert False, "Not reached" | python | def add(self, namespace_uri):
"""Add this namespace URI to the mapping, without caring what
alias it ends up with"""
# See if this namespace is already mapped to an alias
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None:
return alias
# Fall back to generating a numerical alias
i = 0
while True:
alias = 'ext' + str(i)
try:
self.addAlias(namespace_uri, alias)
except KeyError:
i += 1
else:
return alias
assert False, "Not reached" | [
"def",
"add",
"(",
"self",
",",
"namespace_uri",
")",
":",
"# See if this namespace is already mapped to an alias",
"alias",
"=",
"self",
".",
"namespace_to_alias",
".",
"get",
"(",
"namespace_uri",
")",
"if",
"alias",
"is",
"not",
"None",
":",
"return",
"alias",
"# Fall back to generating a numerical alias",
"i",
"=",
"0",
"while",
"True",
":",
"alias",
"=",
"'ext'",
"+",
"str",
"(",
"i",
")",
"try",
":",
"self",
".",
"addAlias",
"(",
"namespace_uri",
",",
"alias",
")",
"except",
"KeyError",
":",
"i",
"+=",
"1",
"else",
":",
"return",
"alias",
"assert",
"False",
",",
"\"Not reached\""
] | Add this namespace URI to the mapping, without caring what
alias it ends up with | [
"Add",
"this",
"namespace",
"URI",
"to",
"the",
"mapping",
"without",
"caring",
"what",
"alias",
"it",
"ends",
"up",
"with"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/message.py#L604-L623 | train |
openid/python-openid | openid/yadis/parsehtml.py | findHTMLMeta | def findHTMLMeta(stream):
"""Look for a meta http-equiv tag with the YADIS header name.
@param stream: Source of the html text
@type stream: Object that implements a read() method that works
like file.read
@return: The URI from which to fetch the XRDS document
@rtype: str
@raises MetaNotFound: raised with the content that was
searched as the first parameter.
"""
parser = YadisHTMLParser()
chunks = []
while 1:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
# End of file
break
chunks.append(chunk)
try:
parser.feed(chunk)
except HTMLParseError, why:
# HTML parse error, so bail
chunks.append(stream.read())
break
except ParseDone, why:
uri = why[0]
if uri is None:
# Parse finished, but we may need the rest of the file
chunks.append(stream.read())
break
else:
return uri
content = ''.join(chunks)
raise MetaNotFound(content) | python | def findHTMLMeta(stream):
"""Look for a meta http-equiv tag with the YADIS header name.
@param stream: Source of the html text
@type stream: Object that implements a read() method that works
like file.read
@return: The URI from which to fetch the XRDS document
@rtype: str
@raises MetaNotFound: raised with the content that was
searched as the first parameter.
"""
parser = YadisHTMLParser()
chunks = []
while 1:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
# End of file
break
chunks.append(chunk)
try:
parser.feed(chunk)
except HTMLParseError, why:
# HTML parse error, so bail
chunks.append(stream.read())
break
except ParseDone, why:
uri = why[0]
if uri is None:
# Parse finished, but we may need the rest of the file
chunks.append(stream.read())
break
else:
return uri
content = ''.join(chunks)
raise MetaNotFound(content) | [
"def",
"findHTMLMeta",
"(",
"stream",
")",
":",
"parser",
"=",
"YadisHTMLParser",
"(",
")",
"chunks",
"=",
"[",
"]",
"while",
"1",
":",
"chunk",
"=",
"stream",
".",
"read",
"(",
"CHUNK_SIZE",
")",
"if",
"not",
"chunk",
":",
"# End of file",
"break",
"chunks",
".",
"append",
"(",
"chunk",
")",
"try",
":",
"parser",
".",
"feed",
"(",
"chunk",
")",
"except",
"HTMLParseError",
",",
"why",
":",
"# HTML parse error, so bail",
"chunks",
".",
"append",
"(",
"stream",
".",
"read",
"(",
")",
")",
"break",
"except",
"ParseDone",
",",
"why",
":",
"uri",
"=",
"why",
"[",
"0",
"]",
"if",
"uri",
"is",
"None",
":",
"# Parse finished, but we may need the rest of the file",
"chunks",
".",
"append",
"(",
"stream",
".",
"read",
"(",
")",
")",
"break",
"else",
":",
"return",
"uri",
"content",
"=",
"''",
".",
"join",
"(",
"chunks",
")",
"raise",
"MetaNotFound",
"(",
"content",
")"
] | Look for a meta http-equiv tag with the YADIS header name.
@param stream: Source of the html text
@type stream: Object that implements a read() method that works
like file.read
@return: The URI from which to fetch the XRDS document
@rtype: str
@raises MetaNotFound: raised with the content that was
searched as the first parameter. | [
"Look",
"for",
"a",
"meta",
"http",
"-",
"equiv",
"tag",
"with",
"the",
"YADIS",
"header",
"name",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/parsehtml.py#L158-L197 | train |
openid/python-openid | openid/extensions/ax.py | toTypeURIs | def toTypeURIs(namespace_map, alias_list_s):
"""Given a namespace mapping and a string containing a
comma-separated list of namespace aliases, return a list of type
URIs that correspond to those aliases.
@param namespace_map: The mapping from namespace URI to alias
@type namespace_map: openid.message.NamespaceMap
@param alias_list_s: The string containing the comma-separated
list of aliases. May also be None for convenience.
@type alias_list_s: str or NoneType
@returns: The list of namespace URIs that corresponds to the
supplied list of aliases. If the string was zero-length or
None, an empty list will be returned.
@raise KeyError: If an alias is present in the list of aliases but
is not present in the namespace map.
"""
uris = []
if alias_list_s:
for alias in alias_list_s.split(','):
type_uri = namespace_map.getNamespaceURI(alias)
if type_uri is None:
raise KeyError(
'No type is defined for attribute name %r' % (alias,))
else:
uris.append(type_uri)
return uris | python | def toTypeURIs(namespace_map, alias_list_s):
"""Given a namespace mapping and a string containing a
comma-separated list of namespace aliases, return a list of type
URIs that correspond to those aliases.
@param namespace_map: The mapping from namespace URI to alias
@type namespace_map: openid.message.NamespaceMap
@param alias_list_s: The string containing the comma-separated
list of aliases. May also be None for convenience.
@type alias_list_s: str or NoneType
@returns: The list of namespace URIs that corresponds to the
supplied list of aliases. If the string was zero-length or
None, an empty list will be returned.
@raise KeyError: If an alias is present in the list of aliases but
is not present in the namespace map.
"""
uris = []
if alias_list_s:
for alias in alias_list_s.split(','):
type_uri = namespace_map.getNamespaceURI(alias)
if type_uri is None:
raise KeyError(
'No type is defined for attribute name %r' % (alias,))
else:
uris.append(type_uri)
return uris | [
"def",
"toTypeURIs",
"(",
"namespace_map",
",",
"alias_list_s",
")",
":",
"uris",
"=",
"[",
"]",
"if",
"alias_list_s",
":",
"for",
"alias",
"in",
"alias_list_s",
".",
"split",
"(",
"','",
")",
":",
"type_uri",
"=",
"namespace_map",
".",
"getNamespaceURI",
"(",
"alias",
")",
"if",
"type_uri",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"'No type is defined for attribute name %r'",
"%",
"(",
"alias",
",",
")",
")",
"else",
":",
"uris",
".",
"append",
"(",
"type_uri",
")",
"return",
"uris"
] | Given a namespace mapping and a string containing a
comma-separated list of namespace aliases, return a list of type
URIs that correspond to those aliases.
@param namespace_map: The mapping from namespace URI to alias
@type namespace_map: openid.message.NamespaceMap
@param alias_list_s: The string containing the comma-separated
list of aliases. May also be None for convenience.
@type alias_list_s: str or NoneType
@returns: The list of namespace URIs that corresponds to the
supplied list of aliases. If the string was zero-length or
None, an empty list will be returned.
@raise KeyError: If an alias is present in the list of aliases but
is not present in the namespace map. | [
"Given",
"a",
"namespace",
"mapping",
"and",
"a",
"string",
"containing",
"a",
"comma",
"-",
"separated",
"list",
"of",
"namespace",
"aliases",
"return",
"a",
"list",
"of",
"type",
"URIs",
"that",
"correspond",
"to",
"those",
"aliases",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L149-L179 | train |
openid/python-openid | openid/extensions/ax.py | FetchRequest.add | def add(self, attribute):
"""Add an attribute to this attribute exchange request.
@param attribute: The attribute that is being requested
@type attribute: C{L{AttrInfo}}
@returns: None
@raise KeyError: when the requested attribute is already
present in this fetch request.
"""
if attribute.type_uri in self.requested_attributes:
raise KeyError('The attribute %r has already been requested'
% (attribute.type_uri,))
self.requested_attributes[attribute.type_uri] = attribute | python | def add(self, attribute):
"""Add an attribute to this attribute exchange request.
@param attribute: The attribute that is being requested
@type attribute: C{L{AttrInfo}}
@returns: None
@raise KeyError: when the requested attribute is already
present in this fetch request.
"""
if attribute.type_uri in self.requested_attributes:
raise KeyError('The attribute %r has already been requested'
% (attribute.type_uri,))
self.requested_attributes[attribute.type_uri] = attribute | [
"def",
"add",
"(",
"self",
",",
"attribute",
")",
":",
"if",
"attribute",
".",
"type_uri",
"in",
"self",
".",
"requested_attributes",
":",
"raise",
"KeyError",
"(",
"'The attribute %r has already been requested'",
"%",
"(",
"attribute",
".",
"type_uri",
",",
")",
")",
"self",
".",
"requested_attributes",
"[",
"attribute",
".",
"type_uri",
"]",
"=",
"attribute"
] | Add an attribute to this attribute exchange request.
@param attribute: The attribute that is being requested
@type attribute: C{L{AttrInfo}}
@returns: None
@raise KeyError: when the requested attribute is already
present in this fetch request. | [
"Add",
"an",
"attribute",
"to",
"this",
"attribute",
"exchange",
"request",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L202-L217 | train |
openid/python-openid | openid/extensions/ax.py | AXKeyValueMessage.addValue | def addValue(self, type_uri, value):
"""Add a single value for the given attribute type to the
message. If there are already values specified for this type,
this value will be sent in addition to the values already
specified.
@param type_uri: The URI for the attribute
@param value: The value to add to the response to the relying
party for this attribute
@type value: unicode
@returns: None
"""
try:
values = self.data[type_uri]
except KeyError:
values = self.data[type_uri] = []
values.append(value) | python | def addValue(self, type_uri, value):
"""Add a single value for the given attribute type to the
message. If there are already values specified for this type,
this value will be sent in addition to the values already
specified.
@param type_uri: The URI for the attribute
@param value: The value to add to the response to the relying
party for this attribute
@type value: unicode
@returns: None
"""
try:
values = self.data[type_uri]
except KeyError:
values = self.data[type_uri] = []
values.append(value) | [
"def",
"addValue",
"(",
"self",
",",
"type_uri",
",",
"value",
")",
":",
"try",
":",
"values",
"=",
"self",
".",
"data",
"[",
"type_uri",
"]",
"except",
"KeyError",
":",
"values",
"=",
"self",
".",
"data",
"[",
"type_uri",
"]",
"=",
"[",
"]",
"values",
".",
"append",
"(",
"value",
")"
] | Add a single value for the given attribute type to the
message. If there are already values specified for this type,
this value will be sent in addition to the values already
specified.
@param type_uri: The URI for the attribute
@param value: The value to add to the response to the relying
party for this attribute
@type value: unicode
@returns: None | [
"Add",
"a",
"single",
"value",
"for",
"the",
"given",
"attribute",
"type",
"to",
"the",
"message",
".",
"If",
"there",
"are",
"already",
"values",
"specified",
"for",
"this",
"type",
"this",
"value",
"will",
"be",
"sent",
"in",
"addition",
"to",
"the",
"values",
"already",
"specified",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L425-L444 | train |
openid/python-openid | openid/extensions/ax.py | AXKeyValueMessage.getSingle | def getSingle(self, type_uri, default=None):
"""Get a single value for an attribute. If no value was sent
for this attribute, use the supplied default. If there is more
than one value for this attribute, this method will fail.
@type type_uri: str
@param type_uri: The URI for the attribute
@param default: The value to return if the attribute was not
sent in the fetch_response.
@returns: The value of the attribute in the fetch_response
message, or the default supplied
@rtype: unicode or NoneType
@raises ValueError: If there is more than one value for this
parameter in the fetch_response message.
@raises KeyError: If the attribute was not sent in this response
"""
values = self.data.get(type_uri)
if not values:
return default
elif len(values) == 1:
return values[0]
else:
raise AXError(
'More than one value present for %r' % (type_uri,)) | python | def getSingle(self, type_uri, default=None):
"""Get a single value for an attribute. If no value was sent
for this attribute, use the supplied default. If there is more
than one value for this attribute, this method will fail.
@type type_uri: str
@param type_uri: The URI for the attribute
@param default: The value to return if the attribute was not
sent in the fetch_response.
@returns: The value of the attribute in the fetch_response
message, or the default supplied
@rtype: unicode or NoneType
@raises ValueError: If there is more than one value for this
parameter in the fetch_response message.
@raises KeyError: If the attribute was not sent in this response
"""
values = self.data.get(type_uri)
if not values:
return default
elif len(values) == 1:
return values[0]
else:
raise AXError(
'More than one value present for %r' % (type_uri,)) | [
"def",
"getSingle",
"(",
"self",
",",
"type_uri",
",",
"default",
"=",
"None",
")",
":",
"values",
"=",
"self",
".",
"data",
".",
"get",
"(",
"type_uri",
")",
"if",
"not",
"values",
":",
"return",
"default",
"elif",
"len",
"(",
"values",
")",
"==",
"1",
":",
"return",
"values",
"[",
"0",
"]",
"else",
":",
"raise",
"AXError",
"(",
"'More than one value present for %r'",
"%",
"(",
"type_uri",
",",
")",
")"
] | Get a single value for an attribute. If no value was sent
for this attribute, use the supplied default. If there is more
than one value for this attribute, this method will fail.
@type type_uri: str
@param type_uri: The URI for the attribute
@param default: The value to return if the attribute was not
sent in the fetch_response.
@returns: The value of the attribute in the fetch_response
message, or the default supplied
@rtype: unicode or NoneType
@raises ValueError: If there is more than one value for this
parameter in the fetch_response message.
@raises KeyError: If the attribute was not sent in this response | [
"Get",
"a",
"single",
"value",
"for",
"an",
"attribute",
".",
"If",
"no",
"value",
"was",
"sent",
"for",
"this",
"attribute",
"use",
"the",
"supplied",
"default",
".",
"If",
"there",
"is",
"more",
"than",
"one",
"value",
"for",
"this",
"attribute",
"this",
"method",
"will",
"fail",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L529-L555 | train |
openid/python-openid | openid/extensions/ax.py | FetchResponse.getExtensionArgs | def getExtensionArgs(self):
"""Serialize this object into arguments in the attribute
exchange namespace
@returns: The dictionary of unqualified attribute exchange
arguments that represent this fetch_response.
@rtype: {unicode;unicode}
"""
aliases = NamespaceMap()
zero_value_types = []
if self.request is not None:
# Validate the data in the context of the request (the
# same attributes should be present in each, and the
# counts in the response must be no more than the counts
# in the request)
for type_uri in self.data:
if type_uri not in self.request:
raise KeyError(
'Response attribute not present in request: %r'
% (type_uri,))
for attr_info in self.request.iterAttrs():
# Copy the aliases from the request so that reading
# the response in light of the request is easier
if attr_info.alias is None:
aliases.add(attr_info.type_uri)
else:
aliases.addAlias(attr_info.type_uri, attr_info.alias)
try:
values = self.data[attr_info.type_uri]
except KeyError:
values = []
zero_value_types.append(attr_info)
if (attr_info.count != UNLIMITED_VALUES) and \
(attr_info.count < len(values)):
raise AXError(
'More than the number of requested values were '
'specified for %r' % (attr_info.type_uri,))
kv_args = self._getExtensionKVArgs(aliases)
# Add the KV args into the response with the args that are
# unique to the fetch_response
ax_args = self._newArgs()
# For each requested attribute, put its type/alias and count
# into the response even if no data were returned.
for attr_info in zero_value_types:
alias = aliases.getAlias(attr_info.type_uri)
kv_args['type.' + alias] = attr_info.type_uri
kv_args['count.' + alias] = '0'
update_url = ((self.request and self.request.update_url)
or self.update_url)
if update_url:
ax_args['update_url'] = update_url
ax_args.update(kv_args)
return ax_args | python | def getExtensionArgs(self):
"""Serialize this object into arguments in the attribute
exchange namespace
@returns: The dictionary of unqualified attribute exchange
arguments that represent this fetch_response.
@rtype: {unicode;unicode}
"""
aliases = NamespaceMap()
zero_value_types = []
if self.request is not None:
# Validate the data in the context of the request (the
# same attributes should be present in each, and the
# counts in the response must be no more than the counts
# in the request)
for type_uri in self.data:
if type_uri not in self.request:
raise KeyError(
'Response attribute not present in request: %r'
% (type_uri,))
for attr_info in self.request.iterAttrs():
# Copy the aliases from the request so that reading
# the response in light of the request is easier
if attr_info.alias is None:
aliases.add(attr_info.type_uri)
else:
aliases.addAlias(attr_info.type_uri, attr_info.alias)
try:
values = self.data[attr_info.type_uri]
except KeyError:
values = []
zero_value_types.append(attr_info)
if (attr_info.count != UNLIMITED_VALUES) and \
(attr_info.count < len(values)):
raise AXError(
'More than the number of requested values were '
'specified for %r' % (attr_info.type_uri,))
kv_args = self._getExtensionKVArgs(aliases)
# Add the KV args into the response with the args that are
# unique to the fetch_response
ax_args = self._newArgs()
# For each requested attribute, put its type/alias and count
# into the response even if no data were returned.
for attr_info in zero_value_types:
alias = aliases.getAlias(attr_info.type_uri)
kv_args['type.' + alias] = attr_info.type_uri
kv_args['count.' + alias] = '0'
update_url = ((self.request and self.request.update_url)
or self.update_url)
if update_url:
ax_args['update_url'] = update_url
ax_args.update(kv_args)
return ax_args | [
"def",
"getExtensionArgs",
"(",
"self",
")",
":",
"aliases",
"=",
"NamespaceMap",
"(",
")",
"zero_value_types",
"=",
"[",
"]",
"if",
"self",
".",
"request",
"is",
"not",
"None",
":",
"# Validate the data in the context of the request (the",
"# same attributes should be present in each, and the",
"# counts in the response must be no more than the counts",
"# in the request)",
"for",
"type_uri",
"in",
"self",
".",
"data",
":",
"if",
"type_uri",
"not",
"in",
"self",
".",
"request",
":",
"raise",
"KeyError",
"(",
"'Response attribute not present in request: %r'",
"%",
"(",
"type_uri",
",",
")",
")",
"for",
"attr_info",
"in",
"self",
".",
"request",
".",
"iterAttrs",
"(",
")",
":",
"# Copy the aliases from the request so that reading",
"# the response in light of the request is easier",
"if",
"attr_info",
".",
"alias",
"is",
"None",
":",
"aliases",
".",
"add",
"(",
"attr_info",
".",
"type_uri",
")",
"else",
":",
"aliases",
".",
"addAlias",
"(",
"attr_info",
".",
"type_uri",
",",
"attr_info",
".",
"alias",
")",
"try",
":",
"values",
"=",
"self",
".",
"data",
"[",
"attr_info",
".",
"type_uri",
"]",
"except",
"KeyError",
":",
"values",
"=",
"[",
"]",
"zero_value_types",
".",
"append",
"(",
"attr_info",
")",
"if",
"(",
"attr_info",
".",
"count",
"!=",
"UNLIMITED_VALUES",
")",
"and",
"(",
"attr_info",
".",
"count",
"<",
"len",
"(",
"values",
")",
")",
":",
"raise",
"AXError",
"(",
"'More than the number of requested values were '",
"'specified for %r'",
"%",
"(",
"attr_info",
".",
"type_uri",
",",
")",
")",
"kv_args",
"=",
"self",
".",
"_getExtensionKVArgs",
"(",
"aliases",
")",
"# Add the KV args into the response with the args that are",
"# unique to the fetch_response",
"ax_args",
"=",
"self",
".",
"_newArgs",
"(",
")",
"# For each requested attribute, put its type/alias and count",
"# into the response even if no data were returned.",
"for",
"attr_info",
"in",
"zero_value_types",
":",
"alias",
"=",
"aliases",
".",
"getAlias",
"(",
"attr_info",
".",
"type_uri",
")",
"kv_args",
"[",
"'type.'",
"+",
"alias",
"]",
"=",
"attr_info",
".",
"type_uri",
"kv_args",
"[",
"'count.'",
"+",
"alias",
"]",
"=",
"'0'",
"update_url",
"=",
"(",
"(",
"self",
".",
"request",
"and",
"self",
".",
"request",
".",
"update_url",
")",
"or",
"self",
".",
"update_url",
")",
"if",
"update_url",
":",
"ax_args",
"[",
"'update_url'",
"]",
"=",
"update_url",
"ax_args",
".",
"update",
"(",
"kv_args",
")",
"return",
"ax_args"
] | Serialize this object into arguments in the attribute
exchange namespace
@returns: The dictionary of unqualified attribute exchange
arguments that represent this fetch_response.
@rtype: {unicode;unicode} | [
"Serialize",
"this",
"object",
"into",
"arguments",
"in",
"the",
"attribute",
"exchange",
"namespace"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L616-L682 | train |
openid/python-openid | openid/extensions/ax.py | FetchResponse.fromSuccessResponse | def fromSuccessResponse(cls, success_response, signed=True):
"""Construct a FetchResponse object from an OpenID library
SuccessResponse object.
@param success_response: A successful id_res response object
@type success_response: openid.consumer.consumer.SuccessResponse
@param signed: Whether non-signed args should be
processsed. If True (the default), only signed arguments
will be processsed.
@type signed: bool
@returns: A FetchResponse containing the data from the OpenID
message, or None if the SuccessResponse did not contain AX
extension data.
@raises AXError: when the AX data cannot be parsed.
"""
self = cls()
ax_args = success_response.extensionResponse(self.ns_uri, signed)
try:
self.parseExtensionArgs(ax_args)
except NotAXMessage, err:
return None
else:
return self | python | def fromSuccessResponse(cls, success_response, signed=True):
"""Construct a FetchResponse object from an OpenID library
SuccessResponse object.
@param success_response: A successful id_res response object
@type success_response: openid.consumer.consumer.SuccessResponse
@param signed: Whether non-signed args should be
processsed. If True (the default), only signed arguments
will be processsed.
@type signed: bool
@returns: A FetchResponse containing the data from the OpenID
message, or None if the SuccessResponse did not contain AX
extension data.
@raises AXError: when the AX data cannot be parsed.
"""
self = cls()
ax_args = success_response.extensionResponse(self.ns_uri, signed)
try:
self.parseExtensionArgs(ax_args)
except NotAXMessage, err:
return None
else:
return self | [
"def",
"fromSuccessResponse",
"(",
"cls",
",",
"success_response",
",",
"signed",
"=",
"True",
")",
":",
"self",
"=",
"cls",
"(",
")",
"ax_args",
"=",
"success_response",
".",
"extensionResponse",
"(",
"self",
".",
"ns_uri",
",",
"signed",
")",
"try",
":",
"self",
".",
"parseExtensionArgs",
"(",
"ax_args",
")",
"except",
"NotAXMessage",
",",
"err",
":",
"return",
"None",
"else",
":",
"return",
"self"
] | Construct a FetchResponse object from an OpenID library
SuccessResponse object.
@param success_response: A successful id_res response object
@type success_response: openid.consumer.consumer.SuccessResponse
@param signed: Whether non-signed args should be
processsed. If True (the default), only signed arguments
will be processsed.
@type signed: bool
@returns: A FetchResponse containing the data from the OpenID
message, or None if the SuccessResponse did not contain AX
extension data.
@raises AXError: when the AX data cannot be parsed. | [
"Construct",
"a",
"FetchResponse",
"object",
"from",
"an",
"OpenID",
"library",
"SuccessResponse",
"object",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L689-L715 | train |
openid/python-openid | openid/extensions/sreg.py | SRegRequest.parseExtensionArgs | def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
for list_name in ['required', 'optional']:
required = (list_name == 'required')
items = args.get(list_name)
if items:
for field_name in items.split(','):
try:
self.requestField(field_name, required, strict)
except ValueError:
if strict:
raise
self.policy_url = args.get('policy_url') | python | def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
for list_name in ['required', 'optional']:
required = (list_name == 'required')
items = args.get(list_name)
if items:
for field_name in items.split(','):
try:
self.requestField(field_name, required, strict)
except ValueError:
if strict:
raise
self.policy_url = args.get('policy_url') | [
"def",
"parseExtensionArgs",
"(",
"self",
",",
"args",
",",
"strict",
"=",
"False",
")",
":",
"for",
"list_name",
"in",
"[",
"'required'",
",",
"'optional'",
"]",
":",
"required",
"=",
"(",
"list_name",
"==",
"'required'",
")",
"items",
"=",
"args",
".",
"get",
"(",
"list_name",
")",
"if",
"items",
":",
"for",
"field_name",
"in",
"items",
".",
"split",
"(",
"','",
")",
":",
"try",
":",
"self",
".",
"requestField",
"(",
"field_name",
",",
"required",
",",
"strict",
")",
"except",
"ValueError",
":",
"if",
"strict",
":",
"raise",
"self",
".",
"policy_url",
"=",
"args",
".",
"get",
"(",
"'policy_url'",
")"
] | Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object | [
"Parse",
"the",
"unqualified",
"simple",
"registration",
"request",
"parameters",
"and",
"add",
"them",
"to",
"this",
"object",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/sreg.py#L232-L271 | train |
openid/python-openid | openid/extensions/sreg.py | SRegRequest.requestField | def requestField(self, field_name, required=False, strict=False):
"""Request the specified field from the OpenID user
@param field_name: the unqualified simple registration field name
@type field_name: str
@param required: whether the given field should be presented
to the user as being a required to successfully complete
the request
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when the field requested is not a simple
registration field or strict is set and the field was
requested more than once
"""
checkFieldName(field_name)
if strict:
if field_name in self.required or field_name in self.optional:
raise ValueError('That field has already been requested')
else:
if field_name in self.required:
return
if field_name in self.optional:
if required:
self.optional.remove(field_name)
else:
return
if required:
self.required.append(field_name)
else:
self.optional.append(field_name) | python | def requestField(self, field_name, required=False, strict=False):
"""Request the specified field from the OpenID user
@param field_name: the unqualified simple registration field name
@type field_name: str
@param required: whether the given field should be presented
to the user as being a required to successfully complete
the request
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when the field requested is not a simple
registration field or strict is set and the field was
requested more than once
"""
checkFieldName(field_name)
if strict:
if field_name in self.required or field_name in self.optional:
raise ValueError('That field has already been requested')
else:
if field_name in self.required:
return
if field_name in self.optional:
if required:
self.optional.remove(field_name)
else:
return
if required:
self.required.append(field_name)
else:
self.optional.append(field_name) | [
"def",
"requestField",
"(",
"self",
",",
"field_name",
",",
"required",
"=",
"False",
",",
"strict",
"=",
"False",
")",
":",
"checkFieldName",
"(",
"field_name",
")",
"if",
"strict",
":",
"if",
"field_name",
"in",
"self",
".",
"required",
"or",
"field_name",
"in",
"self",
".",
"optional",
":",
"raise",
"ValueError",
"(",
"'That field has already been requested'",
")",
"else",
":",
"if",
"field_name",
"in",
"self",
".",
"required",
":",
"return",
"if",
"field_name",
"in",
"self",
".",
"optional",
":",
"if",
"required",
":",
"self",
".",
"optional",
".",
"remove",
"(",
"field_name",
")",
"else",
":",
"return",
"if",
"required",
":",
"self",
".",
"required",
".",
"append",
"(",
"field_name",
")",
"else",
":",
"self",
".",
"optional",
".",
"append",
"(",
"field_name",
")"
] | Request the specified field from the OpenID user
@param field_name: the unqualified simple registration field name
@type field_name: str
@param required: whether the given field should be presented
to the user as being a required to successfully complete
the request
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when the field requested is not a simple
registration field or strict is set and the field was
requested more than once | [
"Request",
"the",
"specified",
"field",
"from",
"the",
"OpenID",
"user"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/sreg.py#L293-L328 | train |
openid/python-openid | openid/extensions/sreg.py | SRegRequest.requestFields | def requestFields(self, field_names, required=False, strict=False):
"""Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
"""
if isinstance(field_names, basestring):
raise TypeError('Fields should be passed as a list of '
'strings (not %r)' % (type(field_names),))
for field_name in field_names:
self.requestField(field_name, required, strict=strict) | python | def requestFields(self, field_names, required=False, strict=False):
"""Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
"""
if isinstance(field_names, basestring):
raise TypeError('Fields should be passed as a list of '
'strings (not %r)' % (type(field_names),))
for field_name in field_names:
self.requestField(field_name, required, strict=strict) | [
"def",
"requestFields",
"(",
"self",
",",
"field_names",
",",
"required",
"=",
"False",
",",
"strict",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"field_names",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"'Fields should be passed as a list of '",
"'strings (not %r)'",
"%",
"(",
"type",
"(",
"field_names",
")",
",",
")",
")",
"for",
"field_name",
"in",
"field_names",
":",
"self",
".",
"requestField",
"(",
"field_name",
",",
"required",
",",
"strict",
"=",
"strict",
")"
] | Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once | [
"Add",
"the",
"given",
"list",
"of",
"fields",
"to",
"the",
"request"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/sreg.py#L330-L351 | train |
openid/python-openid | openid/extensions/sreg.py | SRegRequest.getExtensionArgs | def getExtensionArgs(self):
"""Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str}
"""
args = {}
if self.required:
args['required'] = ','.join(self.required)
if self.optional:
args['optional'] = ','.join(self.optional)
if self.policy_url:
args['policy_url'] = self.policy_url
return args | python | def getExtensionArgs(self):
"""Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str}
"""
args = {}
if self.required:
args['required'] = ','.join(self.required)
if self.optional:
args['optional'] = ','.join(self.optional)
if self.policy_url:
args['policy_url'] = self.policy_url
return args | [
"def",
"getExtensionArgs",
"(",
"self",
")",
":",
"args",
"=",
"{",
"}",
"if",
"self",
".",
"required",
":",
"args",
"[",
"'required'",
"]",
"=",
"','",
".",
"join",
"(",
"self",
".",
"required",
")",
"if",
"self",
".",
"optional",
":",
"args",
"[",
"'optional'",
"]",
"=",
"','",
".",
"join",
"(",
"self",
".",
"optional",
")",
"if",
"self",
".",
"policy_url",
":",
"args",
"[",
"'policy_url'",
"]",
"=",
"self",
".",
"policy_url",
"return",
"args"
] | Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str} | [
"Get",
"a",
"dictionary",
"of",
"unqualified",
"simple",
"registration",
"arguments",
"representing",
"this",
"request",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/sreg.py#L353-L374 | train |
openid/python-openid | openid/extensions/sreg.py | SRegResponse.get | def get(self, field_name, default=None):
"""Like dict.get, except that it checks that the field name is
defined by the simple registration specification"""
checkFieldName(field_name)
return self.data.get(field_name, default) | python | def get(self, field_name, default=None):
"""Like dict.get, except that it checks that the field name is
defined by the simple registration specification"""
checkFieldName(field_name)
return self.data.get(field_name, default) | [
"def",
"get",
"(",
"self",
",",
"field_name",
",",
"default",
"=",
"None",
")",
":",
"checkFieldName",
"(",
"field_name",
")",
"return",
"self",
".",
"data",
".",
"get",
"(",
"field_name",
",",
"default",
")"
] | Like dict.get, except that it checks that the field name is
defined by the simple registration specification | [
"Like",
"dict",
".",
"get",
"except",
"that",
"it",
"checks",
"that",
"the",
"field",
"name",
"is",
"defined",
"by",
"the",
"simple",
"registration",
"specification"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/sreg.py#L483-L487 | train |
openid/python-openid | openid/server/trustroot.py | returnToMatches | def returnToMatches(allowed_return_to_urls, return_to):
"""Is the return_to URL under one of the supplied allowed
return_to URLs?
@since: 2.1.0
"""
for allowed_return_to in allowed_return_to_urls:
# A return_to pattern works the same as a realm, except that
# it's not allowed to use a wildcard. We'll model this by
# parsing it as a realm, and not trying to match it if it has
# a wildcard.
return_realm = TrustRoot.parse(allowed_return_to)
if (# Parses as a trust root
return_realm is not None and
# Does not have a wildcard
not return_realm.wildcard and
# Matches the return_to that we passed in with it
return_realm.validateURL(return_to)
):
return True
# No URL in the list matched
return False | python | def returnToMatches(allowed_return_to_urls, return_to):
"""Is the return_to URL under one of the supplied allowed
return_to URLs?
@since: 2.1.0
"""
for allowed_return_to in allowed_return_to_urls:
# A return_to pattern works the same as a realm, except that
# it's not allowed to use a wildcard. We'll model this by
# parsing it as a realm, and not trying to match it if it has
# a wildcard.
return_realm = TrustRoot.parse(allowed_return_to)
if (# Parses as a trust root
return_realm is not None and
# Does not have a wildcard
not return_realm.wildcard and
# Matches the return_to that we passed in with it
return_realm.validateURL(return_to)
):
return True
# No URL in the list matched
return False | [
"def",
"returnToMatches",
"(",
"allowed_return_to_urls",
",",
"return_to",
")",
":",
"for",
"allowed_return_to",
"in",
"allowed_return_to_urls",
":",
"# A return_to pattern works the same as a realm, except that",
"# it's not allowed to use a wildcard. We'll model this by",
"# parsing it as a realm, and not trying to match it if it has",
"# a wildcard.",
"return_realm",
"=",
"TrustRoot",
".",
"parse",
"(",
"allowed_return_to",
")",
"if",
"(",
"# Parses as a trust root",
"return_realm",
"is",
"not",
"None",
"and",
"# Does not have a wildcard",
"not",
"return_realm",
".",
"wildcard",
"and",
"# Matches the return_to that we passed in with it",
"return_realm",
".",
"validateURL",
"(",
"return_to",
")",
")",
":",
"return",
"True",
"# No URL in the list matched",
"return",
"False"
] | Is the return_to URL under one of the supplied allowed
return_to URLs?
@since: 2.1.0 | [
"Is",
"the",
"return_to",
"URL",
"under",
"one",
"of",
"the",
"supplied",
"allowed",
"return_to",
"URLs?"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/trustroot.py#L381-L407 | train |
openid/python-openid | openid/server/trustroot.py | getAllowedReturnURLs | def getAllowedReturnURLs(relying_party_url):
"""Given a relying party discovery URL return a list of return_to URLs.
@since: 2.1.0
"""
(rp_url_after_redirects, return_to_urls) = services.getServiceEndpoints(
relying_party_url, _extractReturnURL)
if rp_url_after_redirects != relying_party_url:
# Verification caused a redirect
raise RealmVerificationRedirected(
relying_party_url, rp_url_after_redirects)
return return_to_urls | python | def getAllowedReturnURLs(relying_party_url):
"""Given a relying party discovery URL return a list of return_to URLs.
@since: 2.1.0
"""
(rp_url_after_redirects, return_to_urls) = services.getServiceEndpoints(
relying_party_url, _extractReturnURL)
if rp_url_after_redirects != relying_party_url:
# Verification caused a redirect
raise RealmVerificationRedirected(
relying_party_url, rp_url_after_redirects)
return return_to_urls | [
"def",
"getAllowedReturnURLs",
"(",
"relying_party_url",
")",
":",
"(",
"rp_url_after_redirects",
",",
"return_to_urls",
")",
"=",
"services",
".",
"getServiceEndpoints",
"(",
"relying_party_url",
",",
"_extractReturnURL",
")",
"if",
"rp_url_after_redirects",
"!=",
"relying_party_url",
":",
"# Verification caused a redirect",
"raise",
"RealmVerificationRedirected",
"(",
"relying_party_url",
",",
"rp_url_after_redirects",
")",
"return",
"return_to_urls"
] | Given a relying party discovery URL return a list of return_to URLs.
@since: 2.1.0 | [
"Given",
"a",
"relying",
"party",
"discovery",
"URL",
"return",
"a",
"list",
"of",
"return_to",
"URLs",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/trustroot.py#L409-L422 | train |
openid/python-openid | openid/server/trustroot.py | verifyReturnTo | def verifyReturnTo(realm_str, return_to, _vrfy=getAllowedReturnURLs):
"""Verify that a return_to URL is valid for the given realm.
This function builds a discovery URL, performs Yadis discovery on
it, makes sure that the URL does not redirect, parses out the
return_to URLs, and finally checks to see if the current return_to
URL matches the return_to.
@raises DiscoveryFailure: When Yadis discovery fails
@returns: True if the return_to URL is valid for the realm
@since: 2.1.0
"""
realm = TrustRoot.parse(realm_str)
if realm is None:
# The realm does not parse as a URL pattern
return False
try:
allowable_urls = _vrfy(realm.buildDiscoveryURL())
except RealmVerificationRedirected, err:
logging.exception(str(err))
return False
if returnToMatches(allowable_urls, return_to):
return True
else:
logging.error("Failed to validate return_to %r for realm %r, was not "
"in %s" % (return_to, realm_str, allowable_urls))
return False | python | def verifyReturnTo(realm_str, return_to, _vrfy=getAllowedReturnURLs):
"""Verify that a return_to URL is valid for the given realm.
This function builds a discovery URL, performs Yadis discovery on
it, makes sure that the URL does not redirect, parses out the
return_to URLs, and finally checks to see if the current return_to
URL matches the return_to.
@raises DiscoveryFailure: When Yadis discovery fails
@returns: True if the return_to URL is valid for the realm
@since: 2.1.0
"""
realm = TrustRoot.parse(realm_str)
if realm is None:
# The realm does not parse as a URL pattern
return False
try:
allowable_urls = _vrfy(realm.buildDiscoveryURL())
except RealmVerificationRedirected, err:
logging.exception(str(err))
return False
if returnToMatches(allowable_urls, return_to):
return True
else:
logging.error("Failed to validate return_to %r for realm %r, was not "
"in %s" % (return_to, realm_str, allowable_urls))
return False | [
"def",
"verifyReturnTo",
"(",
"realm_str",
",",
"return_to",
",",
"_vrfy",
"=",
"getAllowedReturnURLs",
")",
":",
"realm",
"=",
"TrustRoot",
".",
"parse",
"(",
"realm_str",
")",
"if",
"realm",
"is",
"None",
":",
"# The realm does not parse as a URL pattern",
"return",
"False",
"try",
":",
"allowable_urls",
"=",
"_vrfy",
"(",
"realm",
".",
"buildDiscoveryURL",
"(",
")",
")",
"except",
"RealmVerificationRedirected",
",",
"err",
":",
"logging",
".",
"exception",
"(",
"str",
"(",
"err",
")",
")",
"return",
"False",
"if",
"returnToMatches",
"(",
"allowable_urls",
",",
"return_to",
")",
":",
"return",
"True",
"else",
":",
"logging",
".",
"error",
"(",
"\"Failed to validate return_to %r for realm %r, was not \"",
"\"in %s\"",
"%",
"(",
"return_to",
",",
"realm_str",
",",
"allowable_urls",
")",
")",
"return",
"False"
] | Verify that a return_to URL is valid for the given realm.
This function builds a discovery URL, performs Yadis discovery on
it, makes sure that the URL does not redirect, parses out the
return_to URLs, and finally checks to see if the current return_to
URL matches the return_to.
@raises DiscoveryFailure: When Yadis discovery fails
@returns: True if the return_to URL is valid for the realm
@since: 2.1.0 | [
"Verify",
"that",
"a",
"return_to",
"URL",
"is",
"valid",
"for",
"the",
"given",
"realm",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/trustroot.py#L425-L454 | train |
openid/python-openid | openid/server/trustroot.py | TrustRoot.validateURL | def validateURL(self, url):
"""
Validates a URL against this trust root.
@param url: The URL to check
@type url: C{str}
@return: Whether the given URL is within this trust root.
@rtype: C{bool}
"""
url_parts = _parseURL(url)
if url_parts is None:
return False
proto, host, port, path = url_parts
if proto != self.proto:
return False
if port != self.port:
return False
if '*' in host:
return False
if not self.wildcard:
if host != self.host:
return False
elif ((not host.endswith(self.host)) and
('.' + host) != self.host):
return False
if path != self.path:
path_len = len(self.path)
trust_prefix = self.path[:path_len]
url_prefix = path[:path_len]
# must be equal up to the length of the path, at least
if trust_prefix != url_prefix:
return False
# These characters must be on the boundary between the end
# of the trust root's path and the start of the URL's
# path.
if '?' in self.path:
allowed = '&'
else:
allowed = '?/'
return (self.path[-1] in allowed or
path[path_len] in allowed)
return True | python | def validateURL(self, url):
"""
Validates a URL against this trust root.
@param url: The URL to check
@type url: C{str}
@return: Whether the given URL is within this trust root.
@rtype: C{bool}
"""
url_parts = _parseURL(url)
if url_parts is None:
return False
proto, host, port, path = url_parts
if proto != self.proto:
return False
if port != self.port:
return False
if '*' in host:
return False
if not self.wildcard:
if host != self.host:
return False
elif ((not host.endswith(self.host)) and
('.' + host) != self.host):
return False
if path != self.path:
path_len = len(self.path)
trust_prefix = self.path[:path_len]
url_prefix = path[:path_len]
# must be equal up to the length of the path, at least
if trust_prefix != url_prefix:
return False
# These characters must be on the boundary between the end
# of the trust root's path and the start of the URL's
# path.
if '?' in self.path:
allowed = '&'
else:
allowed = '?/'
return (self.path[-1] in allowed or
path[path_len] in allowed)
return True | [
"def",
"validateURL",
"(",
"self",
",",
"url",
")",
":",
"url_parts",
"=",
"_parseURL",
"(",
"url",
")",
"if",
"url_parts",
"is",
"None",
":",
"return",
"False",
"proto",
",",
"host",
",",
"port",
",",
"path",
"=",
"url_parts",
"if",
"proto",
"!=",
"self",
".",
"proto",
":",
"return",
"False",
"if",
"port",
"!=",
"self",
".",
"port",
":",
"return",
"False",
"if",
"'*'",
"in",
"host",
":",
"return",
"False",
"if",
"not",
"self",
".",
"wildcard",
":",
"if",
"host",
"!=",
"self",
".",
"host",
":",
"return",
"False",
"elif",
"(",
"(",
"not",
"host",
".",
"endswith",
"(",
"self",
".",
"host",
")",
")",
"and",
"(",
"'.'",
"+",
"host",
")",
"!=",
"self",
".",
"host",
")",
":",
"return",
"False",
"if",
"path",
"!=",
"self",
".",
"path",
":",
"path_len",
"=",
"len",
"(",
"self",
".",
"path",
")",
"trust_prefix",
"=",
"self",
".",
"path",
"[",
":",
"path_len",
"]",
"url_prefix",
"=",
"path",
"[",
":",
"path_len",
"]",
"# must be equal up to the length of the path, at least",
"if",
"trust_prefix",
"!=",
"url_prefix",
":",
"return",
"False",
"# These characters must be on the boundary between the end",
"# of the trust root's path and the start of the URL's",
"# path.",
"if",
"'?'",
"in",
"self",
".",
"path",
":",
"allowed",
"=",
"'&'",
"else",
":",
"allowed",
"=",
"'?/'",
"return",
"(",
"self",
".",
"path",
"[",
"-",
"1",
"]",
"in",
"allowed",
"or",
"path",
"[",
"path_len",
"]",
"in",
"allowed",
")",
"return",
"True"
] | Validates a URL against this trust root.
@param url: The URL to check
@type url: C{str}
@return: Whether the given URL is within this trust root.
@rtype: C{bool} | [
"Validates",
"a",
"URL",
"against",
"this",
"trust",
"root",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/trustroot.py#L190-L247 | train |
openid/python-openid | openid/server/trustroot.py | TrustRoot.checkSanity | def checkSanity(cls, trust_root_string):
"""str -> bool
is this a sane trust root?
"""
trust_root = cls.parse(trust_root_string)
if trust_root is None:
return False
else:
return trust_root.isSane() | python | def checkSanity(cls, trust_root_string):
"""str -> bool
is this a sane trust root?
"""
trust_root = cls.parse(trust_root_string)
if trust_root is None:
return False
else:
return trust_root.isSane() | [
"def",
"checkSanity",
"(",
"cls",
",",
"trust_root_string",
")",
":",
"trust_root",
"=",
"cls",
".",
"parse",
"(",
"trust_root_string",
")",
"if",
"trust_root",
"is",
"None",
":",
"return",
"False",
"else",
":",
"return",
"trust_root",
".",
"isSane",
"(",
")"
] | str -> bool
is this a sane trust root? | [
"str",
"-",
">",
"bool"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/trustroot.py#L303-L312 | train |
openid/python-openid | openid/server/trustroot.py | TrustRoot.checkURL | def checkURL(cls, trust_root, url):
"""quick func for validating a url against a trust root. See the
TrustRoot class if you need more control."""
tr = cls.parse(trust_root)
return tr is not None and tr.validateURL(url) | python | def checkURL(cls, trust_root, url):
"""quick func for validating a url against a trust root. See the
TrustRoot class if you need more control."""
tr = cls.parse(trust_root)
return tr is not None and tr.validateURL(url) | [
"def",
"checkURL",
"(",
"cls",
",",
"trust_root",
",",
"url",
")",
":",
"tr",
"=",
"cls",
".",
"parse",
"(",
"trust_root",
")",
"return",
"tr",
"is",
"not",
"None",
"and",
"tr",
".",
"validateURL",
"(",
"url",
")"
] | quick func for validating a url against a trust root. See the
TrustRoot class if you need more control. | [
"quick",
"func",
"for",
"validating",
"a",
"url",
"against",
"a",
"trust",
"root",
".",
"See",
"the",
"TrustRoot",
"class",
"if",
"you",
"need",
"more",
"control",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/trustroot.py#L316-L320 | train |
openid/python-openid | openid/server/trustroot.py | TrustRoot.buildDiscoveryURL | def buildDiscoveryURL(self):
"""Return a discovery URL for this realm.
This function does not check to make sure that the realm is
valid. Its behaviour on invalid inputs is undefined.
@rtype: str
@returns: The URL upon which relying party discovery should be run
in order to verify the return_to URL
@since: 2.1.0
"""
if self.wildcard:
# Use "www." in place of the star
assert self.host.startswith('.'), self.host
www_domain = 'www' + self.host
return '%s://%s%s' % (self.proto, www_domain, self.path)
else:
return self.unparsed | python | def buildDiscoveryURL(self):
"""Return a discovery URL for this realm.
This function does not check to make sure that the realm is
valid. Its behaviour on invalid inputs is undefined.
@rtype: str
@returns: The URL upon which relying party discovery should be run
in order to verify the return_to URL
@since: 2.1.0
"""
if self.wildcard:
# Use "www." in place of the star
assert self.host.startswith('.'), self.host
www_domain = 'www' + self.host
return '%s://%s%s' % (self.proto, www_domain, self.path)
else:
return self.unparsed | [
"def",
"buildDiscoveryURL",
"(",
"self",
")",
":",
"if",
"self",
".",
"wildcard",
":",
"# Use \"www.\" in place of the star",
"assert",
"self",
".",
"host",
".",
"startswith",
"(",
"'.'",
")",
",",
"self",
".",
"host",
"www_domain",
"=",
"'www'",
"+",
"self",
".",
"host",
"return",
"'%s://%s%s'",
"%",
"(",
"self",
".",
"proto",
",",
"www_domain",
",",
"self",
".",
"path",
")",
"else",
":",
"return",
"self",
".",
"unparsed"
] | Return a discovery URL for this realm.
This function does not check to make sure that the realm is
valid. Its behaviour on invalid inputs is undefined.
@rtype: str
@returns: The URL upon which relying party discovery should be run
in order to verify the return_to URL
@since: 2.1.0 | [
"Return",
"a",
"discovery",
"URL",
"for",
"this",
"realm",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/trustroot.py#L324-L343 | train |
openid/python-openid | openid/yadis/manager.py | YadisServiceManager.next | def next(self):
"""Return the next service
self.current() will continue to return that service until the
next call to this method."""
try:
self._current = self.services.pop(0)
except IndexError:
raise StopIteration
else:
return self._current | python | def next(self):
"""Return the next service
self.current() will continue to return that service until the
next call to this method."""
try:
self._current = self.services.pop(0)
except IndexError:
raise StopIteration
else:
return self._current | [
"def",
"next",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_current",
"=",
"self",
".",
"services",
".",
"pop",
"(",
"0",
")",
"except",
"IndexError",
":",
"raise",
"StopIteration",
"else",
":",
"return",
"self",
".",
"_current"
] | Return the next service
self.current() will continue to return that service until the
next call to this method. | [
"Return",
"the",
"next",
"service"
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/manager.py#L27-L37 | train |
openid/python-openid | openid/yadis/manager.py | Discovery.getNextService | def getNextService(self, discover):
"""Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: str -> [service]
@return: the next available service
"""
manager = self.getManager()
if manager is not None and not manager:
self.destroyManager()
if not manager:
yadis_url, services = discover(self.url)
manager = self.createManager(services, yadis_url)
if manager:
service = manager.next()
manager.store(self.session)
else:
service = None
return service | python | def getNextService(self, discover):
"""Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: str -> [service]
@return: the next available service
"""
manager = self.getManager()
if manager is not None and not manager:
self.destroyManager()
if not manager:
yadis_url, services = discover(self.url)
manager = self.createManager(services, yadis_url)
if manager:
service = manager.next()
manager.store(self.session)
else:
service = None
return service | [
"def",
"getNextService",
"(",
"self",
",",
"discover",
")",
":",
"manager",
"=",
"self",
".",
"getManager",
"(",
")",
"if",
"manager",
"is",
"not",
"None",
"and",
"not",
"manager",
":",
"self",
".",
"destroyManager",
"(",
")",
"if",
"not",
"manager",
":",
"yadis_url",
",",
"services",
"=",
"discover",
"(",
"self",
".",
"url",
")",
"manager",
"=",
"self",
".",
"createManager",
"(",
"services",
",",
"yadis_url",
")",
"if",
"manager",
":",
"service",
"=",
"manager",
".",
"next",
"(",
")",
"manager",
".",
"store",
"(",
"self",
".",
"session",
")",
"else",
":",
"service",
"=",
"None",
"return",
"service"
] | Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: str -> [service]
@return: the next available service | [
"Return",
"the",
"next",
"authentication",
"service",
"for",
"the",
"pair",
"of",
"user_input",
"and",
"session",
".",
"This",
"function",
"handles",
"fallback",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/manager.py#L87-L114 | train |
openid/python-openid | openid/yadis/manager.py | Discovery.cleanup | def cleanup(self, force=False):
"""Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
"""
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force)
else:
service = None
return service | python | def cleanup(self, force=False):
"""Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
"""
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force)
else:
service = None
return service | [
"def",
"cleanup",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"manager",
"=",
"self",
".",
"getManager",
"(",
"force",
"=",
"force",
")",
"if",
"manager",
"is",
"not",
"None",
":",
"service",
"=",
"manager",
".",
"current",
"(",
")",
"self",
".",
"destroyManager",
"(",
"force",
"=",
"force",
")",
"else",
":",
"service",
"=",
"None",
"return",
"service"
] | Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service | [
"Clean",
"up",
"Yadis",
"-",
"related",
"services",
"in",
"the",
"session",
"and",
"return",
"the",
"most",
"-",
"recently",
"-",
"attempted",
"service",
"from",
"the",
"manager",
"if",
"one",
"exists",
"."
] | f7e13536f0d1828d3cef5ae7a7b55cabadff37fc | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/manager.py#L116-L134 | train |