hash
stringlengths
64
64
content
stringlengths
0
1.51M
7b0a3a665cc3ee12d7364156ef96e0b73861cf4a223216340339f75289971119
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Sundry function and class decorators.""" import functools import inspect import textwrap import threading import types import warnings from inspect import signature from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning, AstropyPendingDeprecationWarning) __all__ = ['classproperty', 'deprecated', 'deprecated_attribute', 'deprecated_renamed_argument', 'format_doc', 'lazyproperty', 'sharedmethod'] _NotFound = object() def deprecated(since, message='', name='', alternative='', pending=False, obj_type=None, warning_type=AstropyDeprecationWarning): """ Used to mark a function or class as deprecated. To mark an attribute as deprecated, use `deprecated_attribute`. Parameters ---------- since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``func`` may be used for the name of the function, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. ``obj_type`` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated function or class; if not provided the name is automatically determined from the passed in function or class, though this is useful in the case of renamed functions, where the new function is just assigned to the name of the deprecated function. For example:: def new_function(): ... oldFunction = new_function alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of a ``warning_type``. obj_type : str, optional The type of this object, if the automatically determined one needs to be overridden. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. """ method_types = (classmethod, staticmethod, types.MethodType) def deprecate_doc(old_doc, message): """ Returns a given docstring with a deprecation message prepended to it. """ if not old_doc: old_doc = '' old_doc = textwrap.dedent(old_doc).strip('\n') new_doc = (('\n.. deprecated:: {since}' '\n {message}\n\n'.format( **{'since': since, 'message': message.strip()})) + old_doc) if not old_doc: # This is to prevent a spurious 'unexpected unindent' warning from # docutils when the original docstring was blank. new_doc += r'\ ' return new_doc def get_function(func): """ Given a function or classmethod (or other function wrapper type), get the function object. """ if isinstance(func, method_types): func = func.__func__ return func def deprecate_function(func, message, warning_type=warning_type): """ Returns a wrapped function that displays ``warning_type`` when it is called. """ if isinstance(func, method_types): func_wrapper = type(func) else: func_wrapper = lambda f: f # noqa: E731 func = get_function(func) def deprecated_func(*args, **kwargs): if pending: category = AstropyPendingDeprecationWarning else: category = warning_type warnings.warn(message, category, stacklevel=2) return func(*args, **kwargs) # If this is an extension function, we can't call # functools.wraps on it, but we normally don't care. # This crazy way to get the type of a wrapper descriptor is # straight out of the Python 3.3 inspect module docs. if type(func) is not type(str.__dict__['__add__']): # noqa: E721 deprecated_func = functools.wraps(func)(deprecated_func) deprecated_func.__doc__ = deprecate_doc( deprecated_func.__doc__, message) return func_wrapper(deprecated_func) def deprecate_class(cls, message, warning_type=warning_type): """ Update the docstring and wrap the ``__init__`` in-place (or ``__new__`` if the class or any of the bases overrides ``__new__``) so it will give a deprecation warning when an instance is created. This won't work for extension classes because these can't be modified in-place and the alternatives don't work in the general case: - Using a new class that looks and behaves like the original doesn't work because the __new__ method of extension types usually makes sure that it's the same class or a subclass. - Subclassing the class and return the subclass can lead to problems with pickle and will look weird in the Sphinx docs. """ cls.__doc__ = deprecate_doc(cls.__doc__, message) if cls.__new__ is object.__new__: cls.__init__ = deprecate_function(get_function(cls.__init__), message, warning_type) else: cls.__new__ = deprecate_function(get_function(cls.__new__), message, warning_type) return cls def deprecate(obj, message=message, name=name, alternative=alternative, pending=pending, warning_type=warning_type): if obj_type is None: if isinstance(obj, type): obj_type_name = 'class' elif inspect.isfunction(obj): obj_type_name = 'function' elif inspect.ismethod(obj) or isinstance(obj, method_types): obj_type_name = 'method' else: obj_type_name = 'object' else: obj_type_name = obj_type if not name: name = get_function(obj).__name__ altmessage = '' if not message or type(message) is type(deprecate): if pending: message = ('The {func} {obj_type} will be deprecated in a ' 'future version.') else: message = ('The {func} {obj_type} is deprecated and may ' 'be removed in a future version.') if alternative: altmessage = f'\n Use {alternative} instead.' message = ((message.format(**{ 'func': name, 'name': name, 'alternative': alternative, 'obj_type': obj_type_name})) + altmessage) if isinstance(obj, type): return deprecate_class(obj, message, warning_type) else: return deprecate_function(obj, message, warning_type) if type(message) is type(deprecate): return deprecate(message) return deprecate def deprecated_attribute(name, since, message=None, alternative=None, pending=False, warning_type=AstropyDeprecationWarning): """ Used to mark a public attribute as deprecated. This creates a property that will warn when the given attribute name is accessed. To prevent the warning (i.e. for internal code), use the private name for the attribute by prepending an underscore (i.e. ``self._name``). Parameters ---------- name : str The name of the deprecated attribute. since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``name`` may be used for the name of the attribute, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. alternative : str, optional An alternative attribute that the user may use in place of the deprecated attribute. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of ``warning_type``. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. Examples -------- :: class MyClass: # Mark the old_name as deprecated old_name = misc.deprecated_attribute('old_name', '0.1') def method(self): self._old_name = 42 """ private_name = '_' + name specific_deprecated = deprecated(since, name=name, obj_type='attribute', message=message, alternative=alternative, pending=pending, warning_type=warning_type) @specific_deprecated def get(self): return getattr(self, private_name) @specific_deprecated def set(self, val): setattr(self, private_name, val) @specific_deprecated def delete(self): delattr(self, private_name) return property(get, set, delete) def deprecated_renamed_argument(old_name, new_name, since, arg_in_kwargs=False, relax=False, pending=False, warning_type=AstropyDeprecationWarning, alternative='', message=''): """Deprecate a _renamed_ or _removed_ function argument. The decorator assumes that the argument with the ``old_name`` was removed from the function signature and the ``new_name`` replaced it at the **same position** in the signature. If the ``old_name`` argument is given when calling the decorated function the decorator will catch it and issue a deprecation warning and pass it on as ``new_name`` argument. Parameters ---------- old_name : str or sequence of str The old name of the argument. new_name : str or sequence of str or None The new name of the argument. Set this to `None` to remove the argument ``old_name`` instead of renaming it. since : str or number or sequence of str or number The release at which the old argument became deprecated. arg_in_kwargs : bool or sequence of bool, optional If the argument is not a named argument (for example it was meant to be consumed by ``**kwargs``) set this to ``True``. Otherwise the decorator will throw an Exception if the ``new_name`` cannot be found in the signature of the decorated function. Default is ``False``. relax : bool or sequence of bool, optional If ``False`` a ``TypeError`` is raised if both ``new_name`` and ``old_name`` are given. If ``True`` the value for ``new_name`` is used and a Warning is issued. Default is ``False``. pending : bool or sequence of bool, optional If ``True`` this will hide the deprecation warning and ignore the corresponding ``relax`` parameter value. Default is ``False``. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object if ``new_name`` is None. The deprecation warning will tell the user about this alternative if provided. message : str, optional A custom warning message. If provided then ``since`` and ``alternative`` options will have no effect. Raises ------ TypeError If the new argument name cannot be found in the function signature and arg_in_kwargs was False or if it is used to deprecate the name of the ``*args``-, ``**kwargs``-like arguments. At runtime such an Error is raised if both the new_name and old_name were specified when calling the function and "relax=False". Notes ----- The decorator should be applied to a function where the **name** of an argument was changed but it applies the same logic. .. warning:: If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must also be a list or tuple with the same number of entries. ``relax`` and ``arg_in_kwarg`` can be a single bool (applied to all) or also a list/tuple with the same number of entries like ``new_name``, etc. Examples -------- The deprecation warnings are not shown in the following examples. To deprecate a positional or keyword argument:: >>> from astropy.utils.decorators import deprecated_renamed_argument >>> @deprecated_renamed_argument('sig', 'sigma', '1.0') ... def test(sigma): ... return sigma >>> test(2) 2 >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 To deprecate an argument caught inside the ``**kwargs`` the ``arg_in_kwargs`` has to be set:: >>> @deprecated_renamed_argument('sig', 'sigma', '1.0', ... arg_in_kwargs=True) ... def test(**kwargs): ... return kwargs['sigma'] >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 By default providing the new and old keyword will lead to an Exception. If a Warning is desired set the ``relax`` argument:: >>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True) ... def test(sigma): ... return sigma >>> test(sig=2) # doctest: +SKIP 2 It is also possible to replace multiple arguments. The ``old_name``, ``new_name`` and ``since`` have to be `tuple` or `list` and contain the same number of entries:: >>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'], ... ['1.0', 1.2]) ... def test(alpha, beta): ... return alpha, beta >>> test(a=2, b=3) # doctest: +SKIP (2, 3) In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which is applied to all renamed arguments) or must also be a `tuple` or `list` with values for each of the arguments. """ cls_iter = (list, tuple) if isinstance(old_name, cls_iter): n = len(old_name) # Assume that new_name and since are correct (tuple/list with the # appropriate length) in the spirit of the "consenting adults". But the # optional parameters may not be set, so if these are not iterables # wrap them. if not isinstance(arg_in_kwargs, cls_iter): arg_in_kwargs = [arg_in_kwargs] * n if not isinstance(relax, cls_iter): relax = [relax] * n if not isinstance(pending, cls_iter): pending = [pending] * n if not isinstance(message, cls_iter): message = [message] * n else: # To allow a uniform approach later on, wrap all arguments in lists. n = 1 old_name = [old_name] new_name = [new_name] since = [since] arg_in_kwargs = [arg_in_kwargs] relax = [relax] pending = [pending] message = [message] def decorator(function): # The named arguments of the function. arguments = signature(function).parameters keys = list(arguments.keys()) position = [None] * n for i in range(n): # Determine the position of the argument. if arg_in_kwargs[i]: pass else: if new_name[i] is None: param = arguments[old_name[i]] elif new_name[i] in arguments: param = arguments[new_name[i]] # In case the argument is not found in the list of arguments # the only remaining possibility is that it should be caught # by some kind of **kwargs argument. # This case has to be explicitly specified, otherwise throw # an exception! else: raise TypeError( f'"{new_name[i]}" was not specified in the function ' 'signature. If it was meant to be part of ' '"**kwargs" then set "arg_in_kwargs" to "True"') # There are several possibilities now: # 1.) Positional or keyword argument: if param.kind == param.POSITIONAL_OR_KEYWORD: if new_name[i] is None: position[i] = keys.index(old_name[i]) else: position[i] = keys.index(new_name[i]) # 2.) Keyword only argument: elif param.kind == param.KEYWORD_ONLY: # These cannot be specified by position. position[i] = None # 3.) positional-only argument, varargs, varkwargs or some # unknown type: else: raise TypeError(f'cannot replace argument "{new_name[i]}" ' f'of kind {repr(param.kind)}.') @functools.wraps(function) def wrapper(*args, **kwargs): for i in range(n): msg = message[i] or (f'"{old_name[i]}" was deprecated in ' f'version {since[i]} and will be removed ' 'in a future version. ') # The only way to have oldkeyword inside the function is # that it is passed as kwarg because the oldkeyword # parameter was renamed to newkeyword. if old_name[i] in kwargs: value = kwargs.pop(old_name[i]) # Display the deprecation warning only when it's not # pending. if not pending[i]: if not message[i]: if new_name[i] is not None: msg += f'Use argument "{new_name[i]}" instead.' elif alternative: msg += f'\n Use {alternative} instead.' warnings.warn(msg, warning_type, stacklevel=2) # Check if the newkeyword was given as well. newarg_in_args = (position[i] is not None and len(args) > position[i]) newarg_in_kwargs = new_name[i] in kwargs if newarg_in_args or newarg_in_kwargs: if not pending[i]: # If both are given print a Warning if relax is # True or raise an Exception is relax is False. if relax[i]: warnings.warn( f'"{old_name[i]}" and "{new_name[i]}" ' 'keywords were set. ' f'Using the value of "{new_name[i]}".', AstropyUserWarning) else: raise TypeError( f'cannot specify both "{old_name[i]}" and ' f'"{new_name[i]}".') else: # Pass the value of the old argument with the # name of the new argument to the function if new_name[i] is not None: kwargs[new_name[i]] = value # If old argument has no replacement, cast it back. # https://github.com/astropy/astropy/issues/9914 else: kwargs[old_name[i]] = value # Deprecated keyword without replacement is given as # positional argument. elif (not pending[i] and not new_name[i] and position[i] and len(args) > position[i]): if alternative and not message[i]: msg += f'\n Use {alternative} instead.' warnings.warn(msg, warning_type, stacklevel=2) return function(*args, **kwargs) return wrapper return decorator # TODO: This can still be made to work for setters by implementing an # accompanying metaclass that supports it; we just don't need that right this # second class classproperty(property): """ Similar to `property`, but allows class-level properties. That is, a property whose getter is like a `classmethod`. The wrapped method may explicitly use the `classmethod` decorator (which must become before this decorator), or the `classmethod` may be omitted (it is implicit through use of this decorator). .. note:: classproperty only works for *read-only* properties. It does not currently allow writeable/deletable properties, due to subtleties of how Python descriptors work. In order to implement such properties on a class a metaclass for that class must be implemented. Parameters ---------- fget : callable The function that computes the value of this property (in particular, the function when this is used as a decorator) a la `property`. doc : str, optional The docstring for the property--by default inherited from the getter function. lazy : bool, optional If True, caches the value returned by the first call to the getter function, so that it is only called once (used for lazy evaluation of an attribute). This is analogous to `lazyproperty`. The ``lazy`` argument can also be used when `classproperty` is used as a decorator (see the third example below). When used in the decorator syntax this *must* be passed in as a keyword argument. Examples -------- :: >>> class Foo: ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal + 1 ... >>> Foo.bar 2 >>> foo_instance = Foo() >>> foo_instance.bar 2 >>> foo_instance._bar_internal = 2 >>> foo_instance.bar # Ignores instance attributes 2 As previously noted, a `classproperty` is limited to implementing read-only attributes:: >>> class Foo: ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal ... @bar.setter ... def bar(cls, value): ... cls._bar_internal = value ... Traceback (most recent call last): ... NotImplementedError: classproperty can only be read-only; use a metaclass to implement modifiable class-level properties When the ``lazy`` option is used, the getter is only called once:: >>> class Foo: ... @classproperty(lazy=True) ... def bar(cls): ... print("Performing complicated calculation") ... return 1 ... >>> Foo.bar Performing complicated calculation 1 >>> Foo.bar 1 If a subclass inherits a lazy `classproperty` the property is still re-evaluated for the subclass:: >>> class FooSub(Foo): ... pass ... >>> FooSub.bar Performing complicated calculation 1 >>> FooSub.bar 1 """ def __new__(cls, fget=None, doc=None, lazy=False): if fget is None: # Being used as a decorator--return a wrapper that implements # decorator syntax def wrapper(func): return cls(func, lazy=lazy) return wrapper return super().__new__(cls) def __init__(self, fget, doc=None, lazy=False): self._lazy = lazy if lazy: self._lock = threading.RLock() # Protects _cache self._cache = {} fget = self._wrap_fget(fget) super().__init__(fget=fget, doc=doc) # There is a buglet in Python where self.__doc__ doesn't # get set properly on instances of property subclasses if # the doc argument was used rather than taking the docstring # from fget # Related Python issue: https://bugs.python.org/issue24766 if doc is not None: self.__doc__ = doc def __get__(self, obj, objtype): if self._lazy: val = self._cache.get(objtype, _NotFound) if val is _NotFound: with self._lock: # Check if another thread initialised before we locked. val = self._cache.get(objtype, _NotFound) if val is _NotFound: val = self.fget.__wrapped__(objtype) self._cache[objtype] = val else: # The base property.__get__ will just return self here; # instead we pass objtype through to the original wrapped # function (which takes the class as its sole argument) val = self.fget.__wrapped__(objtype) return val def getter(self, fget): return super().getter(self._wrap_fget(fget)) def setter(self, fset): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties") def deleter(self, fdel): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties") @staticmethod def _wrap_fget(orig_fget): if isinstance(orig_fget, classmethod): orig_fget = orig_fget.__func__ # Using stock functools.wraps instead of the fancier version # found later in this module, which is overkill for this purpose @functools.wraps(orig_fget) def fget(obj): return orig_fget(obj.__class__) return fget # Adapted from the recipe at # http://code.activestate.com/recipes/363602-lazy-property-evaluation class lazyproperty(property): """ Works similarly to property(), but computes the value only once. This essentially memorizes the value of the property by storing the result of its computation in the ``__dict__`` of the object instance. This is useful for computing the value of some property that should otherwise be invariant. For example:: >>> class LazyTest: ... @lazyproperty ... def complicated_property(self): ... print('Computing the value for complicated_property...') ... return 42 ... >>> lt = LazyTest() >>> lt.complicated_property Computing the value for complicated_property... 42 >>> lt.complicated_property 42 As the example shows, the second time ``complicated_property`` is accessed, the ``print`` statement is not executed. Only the return value from the first access off ``complicated_property`` is returned. By default, a setter and deleter are used which simply overwrite and delete, respectively, the value stored in ``__dict__``. Any user-specified setter or deleter is executed before executing these default actions. The one exception is that the default setter is not run if the user setter already sets the new value in ``__dict__`` and returns that value and the returned value is not ``None``. """ def __init__(self, fget, fset=None, fdel=None, doc=None): super().__init__(fget, fset, fdel, doc) self._key = self.fget.__name__ self._lock = threading.RLock() def __get__(self, obj, owner=None): try: obj_dict = obj.__dict__ val = obj_dict.get(self._key, _NotFound) if val is _NotFound: with self._lock: # Check if another thread beat us to it. val = obj_dict.get(self._key, _NotFound) if val is _NotFound: val = self.fget(obj) obj_dict[self._key] = val return val except AttributeError: if obj is None: return self raise def __set__(self, obj, val): obj_dict = obj.__dict__ if self.fset: ret = self.fset(obj, val) if ret is not None and obj_dict.get(self._key) is ret: # By returning the value set the setter signals that it # took over setting the value in obj.__dict__; this # mechanism allows it to override the input value return obj_dict[self._key] = val def __delete__(self, obj): if self.fdel: self.fdel(obj) obj.__dict__.pop(self._key, None) # Delete if present class sharedmethod(classmethod): """ This is a method decorator that allows both an instancemethod and a `classmethod` to share the same name. When using `sharedmethod` on a method defined in a class's body, it may be called on an instance, or on a class. In the former case it behaves like a normal instance method (a reference to the instance is automatically passed as the first ``self`` argument of the method):: >>> class Example: ... @sharedmethod ... def identify(self, *args): ... print('self was', self) ... print('additional args were', args) ... >>> ex = Example() >>> ex.identify(1, 2) self was <astropy.utils.decorators.Example object at 0x...> additional args were (1, 2) In the latter case, when the `sharedmethod` is called directly from a class, it behaves like a `classmethod`:: >>> Example.identify(3, 4) self was <class 'astropy.utils.decorators.Example'> additional args were (3, 4) This also supports a more advanced usage, where the `classmethod` implementation can be written separately. If the class's *metaclass* has a method of the same name as the `sharedmethod`, the version on the metaclass is delegated to:: >>> class ExampleMeta(type): ... def identify(self): ... print('this implements the {0}.identify ' ... 'classmethod'.format(self.__name__)) ... >>> class Example(metaclass=ExampleMeta): ... @sharedmethod ... def identify(self): ... print('this implements the instancemethod') ... >>> Example().identify() this implements the instancemethod >>> Example.identify() this implements the Example.identify classmethod """ def __get__(self, obj, objtype=None): if obj is None: mcls = type(objtype) clsmeth = getattr(mcls, self.__func__.__name__, None) if callable(clsmeth): func = clsmeth else: func = self.__func__ return self._make_method(func, objtype) else: return self._make_method(self.__func__, obj) @staticmethod def _make_method(func, instance): return types.MethodType(func, instance) def format_doc(docstring, *args, **kwargs): """ Replaces the docstring of the decorated object and then formats it. The formatting works like :meth:`str.format` and if the decorated object already has a docstring this docstring can be included in the new documentation if you use the ``{__doc__}`` placeholder. Its primary use is for reusing a *long* docstring in multiple functions when it is the same or only slightly different between them. Parameters ---------- docstring : str or object or None The docstring that will replace the docstring of the decorated object. If it is an object like a function or class it will take the docstring of this object. If it is a string it will use the string itself. One special case is if the string is ``None`` then it will use the decorated functions docstring and formats it. args : passed to :meth:`str.format`. kwargs : passed to :meth:`str.format`. If the function has a (not empty) docstring the original docstring is added to the kwargs with the keyword ``'__doc__'``. Raises ------ ValueError If the ``docstring`` (or interpreted docstring if it was ``None`` or not a string) is empty. IndexError, KeyError If a placeholder in the (interpreted) ``docstring`` was not filled. see :meth:`str.format` for more information. Notes ----- Using this decorator allows, for example Sphinx, to parse the correct docstring. Examples -------- Replacing the current docstring is very easy:: >>> from astropy.utils.decorators import format_doc >>> @format_doc('''Perform num1 + num2''') ... def add(num1, num2): ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform num1 + num2 sometimes instead of replacing you only want to add to it:: >>> doc = ''' ... {__doc__} ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... ''' >>> @format_doc(doc) ... def add(num1, num2): ... '''Perform addition.''' ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number in case one might want to format it further:: >>> doc = ''' ... Perform {0}. ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... result of num1 {op} num2 ... {__doc__} ... ''' >>> @format_doc(doc, 'addition', op='+') ... def add(num1, num2): ... return num1+num2 ... >>> @format_doc(doc, 'subtraction', op='-') ... def subtract(num1, num2): ... '''Notes: This one has additional notes.''' ... return num1-num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 >>> help(subtract) # doctest: +SKIP Help on function subtract in module __main__: <BLANKLINE> subtract(num1, num2) Perform subtraction. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 - num2 Notes : This one has additional notes. These methods can be combined an even taking the docstring from another object is possible as docstring attribute. You just have to specify the object:: >>> @format_doc(add) ... def another_add(num1, num2): ... return num1 + num2 ... >>> help(another_add) # doctest: +SKIP Help on function another_add in module __main__: <BLANKLINE> another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 But be aware that this decorator *only* formats the given docstring not the strings passed as ``args`` or ``kwargs`` (not even the original docstring):: >>> @format_doc(doc, 'addition', op='+') ... def yet_another_add(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(yet_another_add) # doctest: +SKIP Help on function yet_another_add in module __main__: <BLANKLINE> yet_another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 This one is good for {0}. To work around it you could specify the docstring to be ``None``:: >>> @format_doc(None, 'addition') ... def last_add_i_swear(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(last_add_i_swear) # doctest: +SKIP Help on function last_add_i_swear in module __main__: <BLANKLINE> last_add_i_swear(num1, num2) This one is good for addition. Using it with ``None`` as docstring allows to use the decorator twice on an object to first parse the new docstring and then to parse the original docstring or the ``args`` and ``kwargs``. """ def set_docstring(obj): if docstring is None: # None means: use the objects __doc__ doc = obj.__doc__ # Delete documentation in this case so we don't end up with # awkwardly self-inserted docs. obj.__doc__ = None elif isinstance(docstring, str): # String: use the string that was given doc = docstring else: # Something else: Use the __doc__ of this doc = docstring.__doc__ if not doc: # In case the docstring is empty it's probably not what was wanted. raise ValueError('docstring must be a string or containing a ' 'docstring that is not empty.') # If the original has a not-empty docstring append it to the format # kwargs. kwargs['__doc__'] = obj.__doc__ or '' obj.__doc__ = doc.format(*args, **kwargs) return obj return set_docstring
0cc34e8971d1642e4dbd9924c9600f90c35bd51ade0642e51895488a035964eb
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Utilities for generating new Python code at runtime.""" import inspect import itertools import keyword import os import re import textwrap from .introspection import find_current_module __all__ = ['make_function_with_signature'] _ARGNAME_RE = re.compile(r'^[A-Za-z][A-Za-z_]*') """ Regular expression used my make_func which limits the allowed argument names for the created function. Only valid Python variable names in the ASCII range and not beginning with '_' are allowed, currently. """ def make_function_with_signature(func, args=(), kwargs={}, varargs=None, varkwargs=None, name=None): """ Make a new function from an existing function but with the desired signature. The desired signature must of course be compatible with the arguments actually accepted by the input function. The ``args`` are strings that should be the names of the positional arguments. ``kwargs`` can map names of keyword arguments to their default values. It may be either a ``dict`` or a list of ``(keyword, default)`` tuples. If ``varargs`` is a string it is added to the positional arguments as ``*<varargs>``. Likewise ``varkwargs`` can be the name for a variable keyword argument placeholder like ``**<varkwargs>``. If not specified the name of the new function is taken from the original function. Otherwise, the ``name`` argument can be used to specify a new name. Note, the names may only be valid Python variable names. """ pos_args = [] key_args = [] if isinstance(kwargs, dict): iter_kwargs = kwargs.items() else: iter_kwargs = iter(kwargs) # Check that all the argument names are valid for item in itertools.chain(args, iter_kwargs): if isinstance(item, tuple): argname = item[0] key_args.append(item) else: argname = item pos_args.append(item) if keyword.iskeyword(argname) or not _ARGNAME_RE.match(argname): raise SyntaxError(f'invalid argument name: {argname}') for item in (varargs, varkwargs): if item is not None: if keyword.iskeyword(item) or not _ARGNAME_RE.match(item): raise SyntaxError(f'invalid argument name: {item}') def_signature = [', '.join(pos_args)] if varargs: def_signature.append(f', *{varargs}') call_signature = def_signature[:] if name is None: name = func.__name__ global_vars = {f'__{name}__func': func} local_vars = {} # Make local variables to handle setting the default args for idx, item in enumerate(key_args): key, value = item default_var = f'_kwargs{idx}' local_vars[default_var] = value def_signature.append(f', {key}={default_var}') call_signature.append(', {0}={0}'.format(key)) if varkwargs: def_signature.append(f', **{varkwargs}') call_signature.append(f', **{varkwargs}') def_signature = ''.join(def_signature).lstrip(', ') call_signature = ''.join(call_signature).lstrip(', ') mod = find_current_module(2) frm = inspect.currentframe().f_back if mod: filename = mod.__file__ modname = mod.__name__ if filename.endswith('.pyc'): filename = os.path.splitext(filename)[0] + '.py' else: filename = '<string>' modname = '__main__' # Subtract 2 from the line number since the length of the template itself # is two lines. Therefore we have to subtract those off in order for the # pointer in tracebacks from __{name}__func to point to the right spot. lineno = frm.f_lineno - 2 # The lstrip is in case there were *no* positional arguments (a rare case) # in any context this will actually be used... template = textwrap.dedent("""{0}\ def {name}({sig1}): return __{name}__func({sig2}) """.format('\n' * lineno, name=name, sig1=def_signature, sig2=call_signature)) code = compile(template, filename, 'single') eval(code, global_vars, local_vars) new_func = local_vars[name] new_func.__module__ = modname new_func.__doc__ = func.__doc__ return new_func
afadecf4e2361860ad68d51e55a4336d6aeb48b5afdfb54107e55c8fef2389af
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for console input and output. """ import codecs import locale import re import math import multiprocessing import os import struct import sys import threading import time from concurrent.futures import ProcessPoolExecutor, as_completed try: import fcntl import termios import signal _CAN_RESIZE_TERMINAL = True except ImportError: _CAN_RESIZE_TERMINAL = False from astropy import conf from .misc import isiterable from .decorators import classproperty __all__ = [ 'isatty', 'color_print', 'human_time', 'human_file_size', 'ProgressBar', 'Spinner', 'print_code_line', 'ProgressBarOrSpinner', 'terminal_size'] _DEFAULT_ENCODING = 'utf-8' class _IPython: """Singleton class given access to IPython streams, etc.""" @classproperty def get_ipython(cls): try: from IPython import get_ipython except ImportError: pass return get_ipython @classproperty def OutStream(cls): if not hasattr(cls, '_OutStream'): cls._OutStream = None try: cls.get_ipython() except NameError: return None try: from ipykernel.iostream import OutStream except ImportError: try: from IPython.zmq.iostream import OutStream except ImportError: from IPython import version_info if version_info[0] >= 4: return None try: from IPython.kernel.zmq.iostream import OutStream except ImportError: return None cls._OutStream = OutStream return cls._OutStream @classproperty def ipyio(cls): if not hasattr(cls, '_ipyio'): try: from IPython.utils import io except ImportError: cls._ipyio = None else: cls._ipyio = io return cls._ipyio @classmethod def get_stream(cls, stream): return getattr(cls.ipyio, stream) def _get_stdout(stderr=False): """ This utility function contains the logic to determine what streams to use by default for standard out/err. Typically this will just return `sys.stdout`, but it contains additional logic for use in IPython on Windows to determine the correct stream to use (usually ``IPython.util.io.stdout`` but only if sys.stdout is a TTY). """ if stderr: stream = 'stderr' else: stream = 'stdout' sys_stream = getattr(sys, stream) return sys_stream def isatty(file): """ Returns `True` if ``file`` is a tty. Most built-in Python file-like objects have an `isatty` member, but some user-defined types may not, so this assumes those are not ttys. """ if (multiprocessing.current_process().name != 'MainProcess' or threading.current_thread().name != 'MainThread'): return False if hasattr(file, 'isatty'): return file.isatty() if _IPython.OutStream is None or (not isinstance(file, _IPython.OutStream)): return False # File is an IPython OutStream. Check whether: # - File name is 'stdout'; or # - File wraps a Console if getattr(file, 'name', None) == 'stdout': return True if hasattr(file, 'stream'): # FIXME: pyreadline has no had new release since 2015, drop it when # IPython minversion is 5.x. # On Windows, in IPython 2 the standard I/O streams will wrap # pyreadline.Console objects if pyreadline is available; this should # be considered a TTY. try: from pyreadline.console import Console as PyreadlineConsole except ImportError: return False return isinstance(file.stream, PyreadlineConsole) return False def terminal_size(file=None): """ Returns a tuple (height, width) containing the height and width of the terminal. This function will look for the width in height in multiple areas before falling back on the width and height in astropy's configuration. """ if file is None: file = _get_stdout() try: s = struct.pack("HHHH", 0, 0, 0, 0) x = fcntl.ioctl(file, termios.TIOCGWINSZ, s) (lines, width, xpixels, ypixels) = struct.unpack("HHHH", x) if lines > 12: lines -= 6 if width > 10: width -= 1 if lines <= 0 or width <= 0: raise Exception('unable to get terminal size') return (lines, width) except Exception: try: # see if POSIX standard variables will work return (int(os.environ.get('LINES')), int(os.environ.get('COLUMNS'))) except TypeError: # fall back on configuration variables, or if not # set, (25, 80) lines = conf.max_lines width = conf.max_width if lines is None: lines = 25 if width is None: width = 80 return lines, width def _color_text(text, color): """ Returns a string wrapped in ANSI color codes for coloring the text in a terminal:: colored_text = color_text('Here is a message', 'blue') This won't actually effect the text until it is printed to the terminal. Parameters ---------- text : str The string to return, bounded by the color codes. color : str An ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). """ color_mapping = { 'black': '0;30', 'red': '0;31', 'green': '0;32', 'brown': '0;33', 'blue': '0;34', 'magenta': '0;35', 'cyan': '0;36', 'lightgrey': '0;37', 'default': '0;39', 'darkgrey': '1;30', 'lightred': '1;31', 'lightgreen': '1;32', 'yellow': '1;33', 'lightblue': '1;34', 'lightmagenta': '1;35', 'lightcyan': '1;36', 'white': '1;37'} if sys.platform == 'win32' and _IPython.OutStream is None: # On Windows do not colorize text unless in IPython return text color_code = color_mapping.get(color, '0;39') return f'\033[{color_code}m{text}\033[0m' def _decode_preferred_encoding(s): """Decode the supplied byte string using the preferred encoding for the locale (`locale.getpreferredencoding`) or, if the default encoding is invalid, fall back first on utf-8, then on latin-1 if the message cannot be decoded with utf-8. """ enc = locale.getpreferredencoding() try: try: return s.decode(enc) except LookupError: enc = _DEFAULT_ENCODING return s.decode(enc) except UnicodeDecodeError: return s.decode('latin-1') def _write_with_fallback(s, write, fileobj): """Write the supplied string with the given write function like ``write(s)``, but use a writer for the locale's preferred encoding in case of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or 'latin-1'. """ try: write(s) return write except UnicodeEncodeError: # Let's try the next approach... pass enc = locale.getpreferredencoding() try: Writer = codecs.getwriter(enc) except LookupError: Writer = codecs.getwriter(_DEFAULT_ENCODING) f = Writer(fileobj) write = f.write try: write(s) return write except UnicodeEncodeError: Writer = codecs.getwriter('latin-1') f = Writer(fileobj) write = f.write # If this doesn't work let the exception bubble up; I'm out of ideas write(s) return write def color_print(*args, end='\n', **kwargs): """ Prints colors and styles to the terminal uses ANSI escape sequences. :: color_print('This is the color ', 'default', 'GREEN', 'green') Parameters ---------- positional args : str The positional arguments come in pairs (*msg*, *color*), where *msg* is the string to display and *color* is the color to display it in. *color* is an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). file : writable file-like, optional Where to write to. Defaults to `sys.stdout`. If file is not a tty (as determined by calling its `isatty` member, if one exists), no coloring will be included. end : str, optional The ending of the message. Defaults to ``\\n``. The end will be printed after resetting any color or font state. """ file = kwargs.get('file', _get_stdout()) write = file.write if isatty(file) and conf.use_color: for i in range(0, len(args), 2): msg = args[i] if i + 1 == len(args): color = '' else: color = args[i + 1] if color: msg = _color_text(msg, color) # Some file objects support writing unicode sensibly on some Python # versions; if this fails try creating a writer using the locale's # preferred encoding. If that fails too give up. write = _write_with_fallback(msg, write, file) write(end) else: for i in range(0, len(args), 2): msg = args[i] write(msg) write(end) def strip_ansi_codes(s): """ Remove ANSI color codes from the string. """ return re.sub('\033\\[([0-9]+)(;[0-9]+)*m', '', s) def human_time(seconds): """ Returns a human-friendly time string that is always exactly 6 characters long. Depending on the number of seconds given, can be one of:: 1w 3d 2d 4h 1h 5m 1m 4s 15s Will be in color if console coloring is turned on. Parameters ---------- seconds : int The number of seconds to represent Returns ------- time : str A human-friendly representation of the given number of seconds that is always exactly 6 characters. """ units = [ ('y', 60 * 60 * 24 * 7 * 52), ('w', 60 * 60 * 24 * 7), ('d', 60 * 60 * 24), ('h', 60 * 60), ('m', 60), ('s', 1), ] seconds = int(seconds) if seconds < 60: return f' {seconds:2d}s' for i in range(len(units) - 1): unit1, limit1 = units[i] unit2, limit2 = units[i + 1] if seconds >= limit1: return '{:2d}{}{:2d}{}'.format( seconds // limit1, unit1, (seconds % limit1) // limit2, unit2) return ' ~inf' def human_file_size(size): """ Returns a human-friendly string representing a file size that is 2-4 characters long. For example, depending on the number of bytes given, can be one of:: 256b 64k 1.1G Parameters ---------- size : int The size of the file (in bytes) Returns ------- size : str A human-friendly representation of the size of the file """ if hasattr(size, 'unit'): # Import units only if necessary because the import takes a # significant time [#4649] from astropy import units as u size = u.Quantity(size, u.byte).value suffixes = ' kMGTPEZY' if size == 0: num_scale = 0 else: num_scale = int(math.floor(math.log(size) / math.log(1000))) if num_scale > 7: suffix = '?' else: suffix = suffixes[num_scale] num_scale = int(math.pow(1000, num_scale)) value = size / num_scale str_value = str(value) if suffix == ' ': str_value = str_value[:str_value.index('.')] elif str_value[2] == '.': str_value = str_value[:2] else: str_value = str_value[:3] return f"{str_value:>3s}{suffix}" class _mapfunc(object): """ A function wrapper to support ProgressBar.map(). """ def __init__(self, func): self._func = func def __call__(self, i_arg): i, arg = i_arg return i, self._func(arg) class ProgressBar: """ A class to display a progress bar in the terminal. It is designed to be used either with the ``with`` statement:: with ProgressBar(len(items)) as bar: for item in enumerate(items): bar.update() or as a generator:: for item in ProgressBar(items): item.process() """ def __init__(self, total_or_items, ipython_widget=False, file=None): """ Parameters ---------- total_or_items : int or sequence If an int, the number of increments in the process being tracked. If a sequence, the items to iterate over. ipython_widget : bool, optional If `True`, the progress bar will display as an IPython notebook widget. file : writable file-like, optional The file to write the progress bar to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any, or special case hacks to detect the IPython console), the progress bar will be completely silent. """ if file is None: file = _get_stdout() if not ipython_widget and not isatty(file): self.update = self._silent_update self._silent = True else: self._silent = False if isiterable(total_or_items): self._items = iter(total_or_items) self._total = len(total_or_items) else: try: self._total = int(total_or_items) except TypeError: raise TypeError("First argument must be int or sequence") else: self._items = iter(range(self._total)) self._file = file self._start_time = time.time() self._human_total = human_file_size(self._total) self._ipython_widget = ipython_widget self._signal_set = False if not ipython_widget: self._should_handle_resize = ( _CAN_RESIZE_TERMINAL and self._file.isatty()) self._handle_resize() if self._should_handle_resize: signal.signal(signal.SIGWINCH, self._handle_resize) self._signal_set = True self.update(0) def _handle_resize(self, signum=None, frame=None): terminal_width = terminal_size(self._file)[1] self._bar_length = terminal_width - 37 def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if not self._silent: if exc_type is None: self.update(self._total) self._file.write('\n') self._file.flush() if self._signal_set: signal.signal(signal.SIGWINCH, signal.SIG_DFL) def __iter__(self): return self def __next__(self): try: rv = next(self._items) except StopIteration: self.__exit__(None, None, None) raise else: self.update() return rv def update(self, value=None): """ Update progress bar via the console or notebook accordingly. """ # Update self.value if value is None: value = self._current_value + 1 self._current_value = value # Choose the appropriate environment if self._ipython_widget: self._update_ipython_widget(value) else: self._update_console(value) def _update_console(self, value=None): """ Update the progress bar to the given value (out of the total given to the constructor). """ if self._total == 0: frac = 1.0 else: frac = float(value) / float(self._total) file = self._file write = file.write if frac > 1: bar_fill = int(self._bar_length) else: bar_fill = int(float(self._bar_length) * frac) write('\r|') color_print('=' * bar_fill, 'blue', file=file, end='') if bar_fill < self._bar_length: color_print('>', 'green', file=file, end='') write('-' * (self._bar_length - bar_fill - 1)) write('|') if value >= self._total: t = time.time() - self._start_time prefix = ' ' elif value <= 0: t = None prefix = '' else: t = ((time.time() - self._start_time) * (1.0 - frac)) / frac prefix = ' ETA ' write(f' {human_file_size(value):>4s}/{self._human_total:>4s}') write(f' ({frac:>6.2%})') write(prefix) if t is not None: write(human_time(t)) self._file.flush() def _update_ipython_widget(self, value=None): """ Update the progress bar to the given value (out of a total given to the constructor). This method is for use in the IPython notebook 2+. """ # Create and display an empty progress bar widget, # if none exists. if not hasattr(self, '_widget'): # Import only if an IPython widget, i.e., widget in iPython NB from IPython import version_info if version_info[0] < 4: from IPython.html import widgets self._widget = widgets.FloatProgressWidget() else: _IPython.get_ipython() from ipywidgets import widgets self._widget = widgets.FloatProgress() from IPython.display import display display(self._widget) self._widget.value = 0 # Calculate percent completion, and update progress bar frac = (value/self._total) self._widget.value = frac * 100 self._widget.description = f' ({frac:>6.2%})' def _silent_update(self, value=None): pass @classmethod def map(cls, function, items, multiprocess=False, file=None, step=100, ipython_widget=False, multiprocessing_start_method=None): """Map function over items while displaying a progress bar with percentage complete. The map operation may run in arbitrary order on the items, but the results are returned in sequential order. :: def work(i): print(i) ProgressBar.map(work, range(50)) Parameters ---------- function : function Function to call for each step items : sequence Sequence where each element is a tuple of arguments to pass to *function*. multiprocess : bool, int, optional If `True`, use the `multiprocessing` module to distribute each task to a different processor core. If a number greater than 1, then use that number of cores. ipython_widget : bool, optional If `True`, the progress bar will display as an IPython notebook widget. file : writable file-like, optional The file to write the progress bar to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any), the scrollbar will be completely silent. step : int, optional Update the progress bar at least every *step* steps (default: 100). If ``multiprocess`` is `True`, this will affect the size of the chunks of ``items`` that are submitted as separate tasks to the process pool. A large step size may make the job complete faster if ``items`` is very long. multiprocessing_start_method : str, optional Useful primarily for testing; if in doubt leave it as the default. When using multiprocessing, certain anomalies occur when starting processes with the "spawn" method (the only option on Windows); other anomalies occur with the "fork" method (the default on Linux). """ if multiprocess: function = _mapfunc(function) items = list(enumerate(items)) results = cls.map_unordered( function, items, multiprocess=multiprocess, file=file, step=step, ipython_widget=ipython_widget, multiprocessing_start_method=multiprocessing_start_method) if multiprocess: _, results = zip(*sorted(results)) results = list(results) return results @classmethod def map_unordered(cls, function, items, multiprocess=False, file=None, step=100, ipython_widget=False, multiprocessing_start_method=None): """Map function over items, reporting the progress. Does a `map` operation while displaying a progress bar with percentage complete. The map operation may run on arbitrary order on the items, and the results may be returned in arbitrary order. :: def work(i): print(i) ProgressBar.map(work, range(50)) Parameters ---------- function : function Function to call for each step items : sequence Sequence where each element is a tuple of arguments to pass to *function*. multiprocess : bool, int, optional If `True`, use the `multiprocessing` module to distribute each task to a different processor core. If a number greater than 1, then use that number of cores. ipython_widget : bool, optional If `True`, the progress bar will display as an IPython notebook widget. file : writable file-like, optional The file to write the progress bar to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any), the scrollbar will be completely silent. step : int, optional Update the progress bar at least every *step* steps (default: 100). If ``multiprocess`` is `True`, this will affect the size of the chunks of ``items`` that are submitted as separate tasks to the process pool. A large step size may make the job complete faster if ``items`` is very long. multiprocessing_start_method : str, optional Useful primarily for testing; if in doubt leave it as the default. When using multiprocessing, certain anomalies occur when starting processes with the "spawn" method (the only option on Windows); other anomalies occur with the "fork" method (the default on Linux). """ results = [] if file is None: file = _get_stdout() with cls(len(items), ipython_widget=ipython_widget, file=file) as bar: if bar._ipython_widget: chunksize = step else: default_step = max(int(float(len(items)) / bar._bar_length), 1) chunksize = min(default_step, step) if not multiprocess or multiprocess < 1: for i, item in enumerate(items): results.append(function(item)) if (i % chunksize) == 0: bar.update(i) else: ctx = multiprocessing.get_context(multiprocessing_start_method) kwargs = dict(mp_context=ctx) with ProcessPoolExecutor( max_workers=(int(multiprocess) if multiprocess is not True else None), **kwargs) as p: for i, f in enumerate( as_completed( p.submit(function, item) for item in items)): bar.update(i) results.append(f.result()) return results class Spinner: """ A class to display a spinner in the terminal. It is designed to be used with the ``with`` statement:: with Spinner("Reticulating splines", "green") as s: for item in enumerate(items): s.update() """ _default_unicode_chars = "◓◑◒◐" _default_ascii_chars = "-/|\\" def __init__(self, msg, color='default', file=None, step=1, chars=None): """ Parameters ---------- msg : str The message to print color : str, optional An ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white. file : writable file-like, optional The file to write the spinner to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any, or special case hacks to detect the IPython console), the spinner will be completely silent. step : int, optional Only update the spinner every *step* steps chars : str, optional The character sequence to use for the spinner """ if file is None: file = _get_stdout() self._msg = msg self._color = color self._file = file self._step = step if chars is None: if conf.unicode_output: chars = self._default_unicode_chars else: chars = self._default_ascii_chars self._chars = chars self._silent = not isatty(file) if self._silent: self._iter = self._silent_iterator() else: self._iter = self._iterator() def _iterator(self): chars = self._chars index = 0 file = self._file write = file.write flush = file.flush try_fallback = True while True: write('\r') color_print(self._msg, self._color, file=file, end='') write(' ') try: if try_fallback: write = _write_with_fallback(chars[index], write, file) else: write(chars[index]) except UnicodeError: # If even _write_with_fallback failed for any reason just give # up on trying to use the unicode characters chars = self._default_ascii_chars write(chars[index]) try_fallback = False # No good will come of using this again flush() yield for i in range(self._step): yield index = (index + 1) % len(chars) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): file = self._file write = file.write flush = file.flush if not self._silent: write('\r') color_print(self._msg, self._color, file=file, end='') if exc_type is None: color_print(' [Done]', 'green', file=file) else: color_print(' [Failed]', 'red', file=file) flush() def __iter__(self): return self def __next__(self): next(self._iter) def update(self, value=None): """Update the spin wheel in the terminal. Parameters ---------- value : int, optional Ignored (present just for compatibility with `ProgressBar.update`). """ next(self) def _silent_iterator(self): color_print(self._msg, self._color, file=self._file, end='') self._file.flush() while True: yield class ProgressBarOrSpinner: """ A class that displays either a `ProgressBar` or `Spinner` depending on whether the total size of the operation is known or not. It is designed to be used with the ``with`` statement:: if file.has_length(): length = file.get_length() else: length = None bytes_read = 0 with ProgressBarOrSpinner(length) as bar: while file.read(blocksize): bytes_read += blocksize bar.update(bytes_read) """ def __init__(self, total, msg, color='default', file=None): """ Parameters ---------- total : int or None If an int, the number of increments in the process being tracked and a `ProgressBar` is displayed. If `None`, a `Spinner` is displayed. msg : str The message to display above the `ProgressBar` or alongside the `Spinner`. color : str, optional The color of ``msg``, if any. Must be an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white. file : writable file-like, optional The file to write the to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any), only ``msg`` will be displayed: the `ProgressBar` or `Spinner` will be silent. """ if file is None: file = _get_stdout() if total is None or not isatty(file): self._is_spinner = True self._obj = Spinner(msg, color=color, file=file) else: self._is_spinner = False color_print(msg, color, file=file) self._obj = ProgressBar(total, file=file) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): return self._obj.__exit__(exc_type, exc_value, traceback) def update(self, value): """ Update the progress bar to the given value (out of the total given to the constructor. """ self._obj.update(value) def print_code_line(line, col=None, file=None, tabwidth=8, width=70): """ Prints a line of source code, highlighting a particular character position in the line. Useful for displaying the context of error messages. If the line is more than ``width`` characters, the line is truncated accordingly and '…' characters are inserted at the front and/or end. It looks like this:: there_is_a_syntax_error_here : ^ Parameters ---------- line : unicode The line of code to display col : int, optional The character in the line to highlight. ``col`` must be less than ``len(line)``. file : writable file-like, optional Where to write to. Defaults to `sys.stdout`. tabwidth : int, optional The number of spaces per tab (``'\\t'``) character. Default is 8. All tabs will be converted to spaces to ensure that the caret lines up with the correct column. width : int, optional The width of the display, beyond which the line will be truncated. Defaults to 70 (this matches the default in the standard library's `textwrap` module). """ if file is None: file = _get_stdout() if conf.unicode_output: ellipsis = '…' else: ellipsis = '...' write = file.write if col is not None: if col >= len(line): raise ValueError('col must be less the the line length.') ntabs = line[:col].count('\t') col += ntabs * (tabwidth - 1) line = line.rstrip('\n') line = line.replace('\t', ' ' * tabwidth) if col is not None and col > width: new_col = min(width // 2, len(line) - col) offset = col - new_col line = line[offset + len(ellipsis):] width -= len(ellipsis) new_col = col col -= offset color_print(ellipsis, 'darkgrey', file=file, end='') if len(line) > width: write(line[:width - len(ellipsis)]) color_print(ellipsis, 'darkgrey', file=file) else: write(line) write('\n') if col is not None: write(' ' * col) color_print('^', 'red', file=file) # The following four Getch* classes implement unbuffered character reading from # stdin on Windows, linux, MacOSX. This is taken directly from ActiveState # Code Recipes: # http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/ # class Getch: """Get a single character from standard input without screen echo. Returns ------- char : str (one character) """ def __init__(self): try: self.impl = _GetchWindows() except ImportError: try: self.impl = _GetchMacCarbon() except (ImportError, AttributeError): self.impl = _GetchUnix() def __call__(self): return self.impl() class _GetchUnix: def __init__(self): import tty # pylint: disable=W0611 import sys # pylint: disable=W0611 # import termios now or else you'll get the Unix # version on the Mac import termios # pylint: disable=W0611 def __call__(self): import sys import tty import termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class _GetchWindows: def __init__(self): import msvcrt # pylint: disable=W0611 def __call__(self): import msvcrt return msvcrt.getch() class _GetchMacCarbon: """ A function which returns the current ASCII key that is down; if no ASCII key is down, the null string is returned. The page http://www.mactech.com/macintosh-c/chap02-1.html was very helpful in figuring out how to do this. """ def __init__(self): import Carbon Carbon.Evt # see if it has this (in Unix, it doesn't) def __call__(self): import Carbon if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask return '' else: # # The event contains the following info: # (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1] # # The message (msg) contains the ASCII char which is # extracted with the 0x000000FF charCodeMask; this # number is converted to an ASCII character with chr() and # returned # (what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1] return chr(msg & 0x000000FF)
2c8259647998dd5792b4229aae23702766d51fd4ea1a1e1927543efc121634dd
""" A simple class to manage a piece of global science state. See :ref:`astropy:config-developer` for more details. """ __all__ = ['ScienceState'] class ScienceState: """ Science state subclasses are used to manage global items that can affect science results. Subclasses will generally override `validate` to convert from any of the acceptable inputs (such as strings) to the appropriate internal objects, and set an initial value to the ``_value`` member so it has a default. Examples -------- :: class MyState(ScienceState): @classmethod def validate(cls, value): if value not in ('A', 'B', 'C'): raise ValueError("Must be one of A, B, C") return value """ def __init__(self): raise RuntimeError( "This class is a singleton. Do not instantiate.") @classmethod def get(cls): """ Get the current science state value. """ return cls.validate(cls._value) @classmethod def set(cls, value): """ Set the current science state value. """ class _Context: def __init__(self, parent, value): self._value = value self._parent = parent def __enter__(self): pass def __exit__(self, type, value, tb): self._parent._value = self._value def __repr__(self): # Ensure we have a single-line repr, just in case our # value is not something simple like a string. value_repr, lb, _ = repr(self._parent._value).partition('\n') if lb: value_repr += '...' return (f'<ScienceState {self._parent.__name__}: {value_repr}>') ctx = _Context(cls, cls._value) value = cls.validate(value) cls._value = value return ctx @classmethod def validate(cls, value): """ Validate the value and convert it to its native type, if necessary. """ return value
cbd72c3d3599b4af14d1836d76de24bef14da9cf4f500aabfbffbb40b0e5a8db
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ A module containing specialized collection classes. """ class HomogeneousList(list): """ A subclass of list that contains only elements of a given type or types. If an item that is not of the specified type is added to the list, a `TypeError` is raised. """ def __init__(self, types, values=[]): """ Parameters ---------- types : sequence of types The types to accept. values : sequence, optional An initial set of values. """ self._types = types super().__init__() self.extend(values) def _assert(self, x): if not isinstance(x, self._types): raise TypeError( f"homogeneous list must contain only objects of type '{self._types}'") def __iadd__(self, other): self.extend(other) return self def __setitem__(self, idx, value): if isinstance(idx, slice): value = list(value) for item in value: self._assert(item) else: self._assert(value) return super().__setitem__(idx, value) def append(self, x): self._assert(x) return super().append(x) def insert(self, i, x): self._assert(x) return super().insert(i, x) def extend(self, x): for item in x: self._assert(item) super().append(item)
cb60c2220883e25b341c065672b0d7669323952befc1edf48fa17e80b95bce24
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Functions related to Python runtime introspection.""" import collections import inspect import os import sys import types import importlib from importlib import metadata from packaging.version import Version from astropy.utils.decorators import deprecated_renamed_argument __all__ = ['resolve_name', 'minversion', 'find_current_module', 'isinstancemethod'] __doctest_skip__ = ['find_current_module'] if sys.version_info[:2] >= (3, 10): from importlib.metadata import packages_distributions else: def packages_distributions(): """ Return a mapping of top-level packages to their distributions. Note: copied from https://github.com/python/importlib_metadata/pull/287 """ pkg_to_dist = collections.defaultdict(list) for dist in metadata.distributions(): for pkg in (dist.read_text('top_level.txt') or '').split(): pkg_to_dist[pkg].append(dist.metadata['Name']) return dict(pkg_to_dist) def resolve_name(name, *additional_parts): """Resolve a name like ``module.object`` to an object and return it. This ends up working like ``from module import object`` but is easier to deal with than the `__import__` builtin and supports digging into submodules. Parameters ---------- name : `str` A dotted path to a Python object--that is, the name of a function, class, or other object in a module with the full path to that module, including parent modules, separated by dots. Also known as the fully qualified name of the object. additional_parts : iterable, optional If more than one positional arguments are given, those arguments are automatically dotted together with ``name``. Examples -------- >>> resolve_name('astropy.utils.introspection.resolve_name') <function resolve_name at 0x...> >>> resolve_name('astropy', 'utils', 'introspection', 'resolve_name') <function resolve_name at 0x...> Raises ------ `ImportError` If the module or named object is not found. """ additional_parts = '.'.join(additional_parts) if additional_parts: name = name + '.' + additional_parts parts = name.split('.') if len(parts) == 1: # No dots in the name--just a straight up module import cursor = 1 fromlist = [] else: cursor = len(parts) - 1 fromlist = [parts[-1]] module_name = parts[:cursor] while cursor > 0: try: ret = __import__('.'.join(module_name), fromlist=fromlist) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] fromlist = [parts[cursor]] ret = '' for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret @deprecated_renamed_argument('version_path', None, '5.0') def minversion(module, version, inclusive=True, version_path='__version__'): """ Returns `True` if the specified Python module satisfies a minimum version requirement, and `False` if not. .. deprecated:: ``version_path`` is not used anymore and is deprecated in ``astropy`` 5.0. Parameters ---------- module : module or `str` An imported module of which to check the version, or the name of that module (in which case an import of that module is attempted-- if this fails `False` is returned). version : `str` The version as a string that this module must have at a minimum (e.g. ``'0.12'``). inclusive : `bool` The specified version meets the requirement inclusively (i.e. ``>=``) as opposed to strictly greater than (default: `True`). Examples -------- >>> import astropy >>> minversion(astropy, '0.4.4') True """ if isinstance(module, types.ModuleType): module_name = module.__name__ module_version = getattr(module, '__version__', None) elif isinstance(module, str): module_name = module module_version = None try: module = resolve_name(module_name) except ImportError: return False else: raise ValueError('module argument must be an actual imported ' 'module, or the import name of the module; ' f'got {repr(module)}') if module_version is None: try: module_version = metadata.version(module_name) except metadata.PackageNotFoundError: # Maybe the distribution name is different from package name. # Calling packages_distributions is costly so we do it only # if necessary, as only a few packages don't have the same # distribution name. dist_names = packages_distributions() module_version = metadata.version(dist_names[module_name][0]) if inclusive: return Version(module_version) >= Version(version) else: return Version(module_version) > Version(version) def find_current_module(depth=1, finddiff=False): """ Determines the module/package from which this function is called. This function has two modes, determined by the ``finddiff`` option. it will either simply go the requested number of frames up the call stack (if ``finddiff`` is False), or it will go up the call stack until it reaches a module that is *not* in a specified set. Parameters ---------- depth : int Specifies how far back to go in the call stack (0-indexed, so that passing in 0 gives back `astropy.utils.misc`). finddiff : bool or list If False, the returned ``mod`` will just be ``depth`` frames up from the current frame. Otherwise, the function will start at a frame ``depth`` up from current, and continue up the call stack to the first module that is *different* from those in the provided list. In this case, ``finddiff`` can be a list of modules or modules names. Alternatively, it can be True, which will use the module ``depth`` call stack frames up as the module the returned module most be different from. Returns ------- mod : module or None The module object or None if the package cannot be found. The name of the module is available as the ``__name__`` attribute of the returned object (if it isn't None). Raises ------ ValueError If ``finddiff`` is a list with an invalid entry. Examples -------- The examples below assume that there are two modules in a package named ``pkg``. ``mod1.py``:: def find1(): from astropy.utils import find_current_module print find_current_module(1).__name__ def find2(): from astropy.utils import find_current_module cmod = find_current_module(2) if cmod is None: print 'None' else: print cmod.__name__ def find_diff(): from astropy.utils import find_current_module print find_current_module(0,True).__name__ ``mod2.py``:: def find(): from .mod1 import find2 find2() With these modules in place, the following occurs:: >>> from pkg import mod1, mod2 >>> from astropy.utils import find_current_module >>> mod1.find1() pkg.mod1 >>> mod1.find2() None >>> mod2.find() pkg.mod2 >>> find_current_module(0) <module 'astropy.utils.misc' from 'astropy/utils/misc.py'> >>> mod1.find_diff() pkg.mod1 """ frm = inspect.currentframe() for i in range(depth): frm = frm.f_back if frm is None: return None if finddiff: currmod = _get_module_from_frame(frm) if finddiff is True: diffmods = [currmod] else: diffmods = [] for fd in finddiff: if inspect.ismodule(fd): diffmods.append(fd) elif isinstance(fd, str): diffmods.append(importlib.import_module(fd)) elif fd is True: diffmods.append(currmod) else: raise ValueError('invalid entry in finddiff') while frm: frmb = frm.f_back modb = _get_module_from_frame(frmb) if modb not in diffmods: return modb frm = frmb else: return _get_module_from_frame(frm) def _get_module_from_frame(frm): """Uses inspect.getmodule() to get the module that the current frame's code is running in. However, this does not work reliably for code imported from a zip file, so this provides a fallback mechanism for that case which is less reliable in general, but more reliable than inspect.getmodule() for this particular case. """ mod = inspect.getmodule(frm) if mod is not None: return mod # Check to see if we're importing from a bundle file. First ensure that # __file__ is available in globals; this is cheap to check to bail out # immediately if this fails if '__file__' in frm.f_globals and '__name__' in frm.f_globals: filename = frm.f_globals['__file__'] # Using __file__ from the frame's globals and getting it into the form # of an absolute path name with .py at the end works pretty well for # looking up the module using the same means as inspect.getmodule if filename[-4:].lower() in ('.pyc', '.pyo'): filename = filename[:-4] + '.py' filename = os.path.realpath(os.path.abspath(filename)) if filename in inspect.modulesbyfile: return sys.modules.get(inspect.modulesbyfile[filename]) # On Windows, inspect.modulesbyfile appears to have filenames stored # in lowercase, so we check for this case too. if filename.lower() in inspect.modulesbyfile: return sys.modules.get(inspect.modulesbyfile[filename.lower()]) # Otherwise there are still some even trickier things that might be possible # to track down the module, but we'll leave those out unless we find a case # where it's really necessary. So return None if the module is not found. return None def find_mod_objs(modname, onlylocals=False): """ Returns all the public attributes of a module referenced by name. .. note:: The returned list *not* include subpackages or modules of ``modname``, nor does it include private attributes (those that begin with '_' or are not in `__all__`). Parameters ---------- modname : str The name of the module to search. onlylocals : bool or list of str If `True`, only attributes that are either members of ``modname`` OR one of its modules or subpackages will be included. If it is a list of strings, those specify the possible packages that will be considered "local". Returns ------- localnames : list of str A list of the names of the attributes as they are named in the module ``modname`` . fqnames : list of str A list of the full qualified names of the attributes (e.g., ``astropy.utils.introspection.find_mod_objs``). For attributes that are simple variables, this is based on the local name, but for functions or classes it can be different if they are actually defined elsewhere and just referenced in ``modname``. objs : list of objects A list of the actual attributes themselves (in the same order as the other arguments) """ mod = resolve_name(modname) if hasattr(mod, '__all__'): pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__] else: pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_'] # filter out modules and pull the names and objs out ismodule = inspect.ismodule localnames = [k for k, v in pkgitems if not ismodule(v)] objs = [v for k, v in pkgitems if not ismodule(v)] # fully qualified names can be determined from the object's module fqnames = [] for obj, lnm in zip(objs, localnames): if hasattr(obj, '__module__') and hasattr(obj, '__name__'): fqnames.append(obj.__module__ + '.' + obj.__name__) else: fqnames.append(modname + '.' + lnm) if onlylocals: if onlylocals is True: onlylocals = [modname] valids = [any(fqn.startswith(nm) for nm in onlylocals) for fqn in fqnames] localnames = [e for i, e in enumerate(localnames) if valids[i]] fqnames = [e for i, e in enumerate(fqnames) if valids[i]] objs = [e for i, e in enumerate(objs) if valids[i]] return localnames, fqnames, objs # Note: I would have preferred call this is_instancemethod, but this naming is # for consistency with other functions in the `inspect` module def isinstancemethod(cls, obj): """ Returns `True` if the given object is an instance method of the class it is defined on (as opposed to a `staticmethod` or a `classmethod`). This requires both the class the object is a member of as well as the object itself in order to make this determination. Parameters ---------- cls : `type` The class on which this method was defined. obj : `object` A member of the provided class (the membership is not checked directly, but this function will always return `False` if the given object is not a member of the given class). Examples -------- >>> class MetaClass(type): ... def a_classmethod(cls): pass ... >>> class MyClass(metaclass=MetaClass): ... def an_instancemethod(self): pass ... ... @classmethod ... def another_classmethod(cls): pass ... ... @staticmethod ... def a_staticmethod(): pass ... >>> isinstancemethod(MyClass, MyClass.a_classmethod) False >>> isinstancemethod(MyClass, MyClass.another_classmethod) False >>> isinstancemethod(MyClass, MyClass.a_staticmethod) False >>> isinstancemethod(MyClass, MyClass.an_instancemethod) True """ return _isinstancemethod(cls, obj) def _isinstancemethod(cls, obj): if not isinstance(obj, types.FunctionType): return False # Unfortunately it seems the easiest way to get to the original # staticmethod object is to look in the class's __dict__, though we # also need to look up the MRO in case the method is not in the given # class's dict name = obj.__name__ for basecls in cls.mro(): # This includes cls if name in basecls.__dict__: return not isinstance(basecls.__dict__[name], staticmethod) # This shouldn't happen, though this is the most sensible response if # it does. raise AttributeError(name)
13690173895d42e221c1e4d9963b5819740001ac05c8db28a72ffb979714860e
import difflib import functools import sys import numbers import numpy as np from .misc import indent __all__ = ['fixed_width_indent', 'diff_values', 'report_diff_values', 'where_not_allclose'] # Smaller default shift-width for indent fixed_width_indent = functools.partial(indent, width=2) def diff_values(a, b, rtol=0.0, atol=0.0): """ Diff two scalar values. If both values are floats, they are compared to within the given absolute and relative tolerance. Parameters ---------- a, b : int, float, str Scalar values to compare. rtol, atol : float Relative and absolute tolerances as accepted by :func:`numpy.allclose`. Returns ------- is_different : bool `True` if they are different, else `False`. """ if isinstance(a, float) and isinstance(b, float): if np.isnan(a) and np.isnan(b): return False return not np.allclose(a, b, rtol=rtol, atol=atol) else: return a != b def report_diff_values(a, b, fileobj=sys.stdout, indent_width=0): """ Write a diff report between two values to the specified file-like object. Parameters ---------- a, b Values to compare. Anything that can be turned into strings and compared using :py:mod:`difflib` should work. fileobj : object File-like object to write to. The default is ``sys.stdout``, which writes to terminal. indent_width : int Character column(s) to indent. Returns ------- identical : bool `True` if no diff, else `False`. """ if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): if a.shape != b.shape: fileobj.write( fixed_width_indent(' Different array shapes:\n', indent_width)) report_diff_values(str(a.shape), str(b.shape), fileobj=fileobj, indent_width=indent_width + 1) return False diff_indices = np.transpose(np.where(a != b)) num_diffs = diff_indices.shape[0] for idx in diff_indices[:3]: lidx = idx.tolist() fileobj.write( fixed_width_indent(f' at {lidx!r}:\n', indent_width)) report_diff_values(a[tuple(idx)], b[tuple(idx)], fileobj=fileobj, indent_width=indent_width + 1) if num_diffs > 3: fileobj.write(fixed_width_indent( f' ...and at {num_diffs - 3:d} more indices.\n', indent_width)) return False return num_diffs == 0 typea = type(a) typeb = type(b) if typea == typeb: lnpad = ' ' sign_a = 'a>' sign_b = 'b>' if isinstance(a, numbers.Number): a = repr(a) b = repr(b) else: a = str(a) b = str(b) else: padding = max(len(typea.__name__), len(typeb.__name__)) + 3 lnpad = (padding + 1) * ' ' sign_a = ('(' + typea.__name__ + ') ').rjust(padding) + 'a>' sign_b = ('(' + typeb.__name__ + ') ').rjust(padding) + 'b>' is_a_str = isinstance(a, str) is_b_str = isinstance(b, str) a = (repr(a) if ((is_a_str and not is_b_str) or (not is_a_str and isinstance(a, numbers.Number))) else str(a)) b = (repr(b) if ((is_b_str and not is_a_str) or (not is_b_str and isinstance(b, numbers.Number))) else str(b)) identical = True for line in difflib.ndiff(a.splitlines(), b.splitlines()): if line[0] == '-': identical = False line = sign_a + line[1:] elif line[0] == '+': identical = False line = sign_b + line[1:] else: line = lnpad + line fileobj.write(fixed_width_indent( ' {}\n'.format(line.rstrip('\n')), indent_width)) return identical def where_not_allclose(a, b, rtol=1e-5, atol=1e-8): """ A version of :func:`numpy.allclose` that returns the indices where the two arrays differ, instead of just a boolean value. Parameters ---------- a, b : array-like Input arrays to compare. rtol, atol : float Relative and absolute tolerances as accepted by :func:`numpy.allclose`. Returns ------- idx : tuple of array Indices where the two arrays differ. """ # Create fixed mask arrays to handle INF and NaN; currently INF and NaN # are handled as equivalent if not np.all(np.isfinite(a)): a = np.ma.fix_invalid(a).data if not np.all(np.isfinite(b)): b = np.ma.fix_invalid(b).data if atol == 0.0 and rtol == 0.0: # Use a faster comparison for the most simple (and common) case return np.where(a != b) return np.where(np.abs(a - b) > (atol + rtol * np.abs(b)))
1ae8605ec5f30e90a820061913b1692c2594200378e6b09966bbe9d00c3083b5
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains helper functions and classes for handling metadata. """ from functools import wraps import warnings from collections import OrderedDict from collections.abc import Mapping from copy import deepcopy import numpy as np from astropy.utils.exceptions import AstropyWarning from astropy.utils.misc import dtype_bytes_or_chars __all__ = ['MergeConflictError', 'MergeConflictWarning', 'MERGE_STRATEGIES', 'common_dtype', 'MergePlus', 'MergeNpConcatenate', 'MergeStrategy', 'MergeStrategyMeta', 'enable_merge_strategies', 'merge', 'MetaData', 'MetaAttribute'] class MergeConflictError(TypeError): pass class MergeConflictWarning(AstropyWarning): pass MERGE_STRATEGIES = [] def common_dtype(arrs): """ Use numpy to find the common dtype for a list of ndarrays. Only allow arrays within the following fundamental numpy data types: ``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void`` Parameters ---------- arrs : list of ndarray Arrays for which to find the common dtype Returns ------- dtype_str : str String representation of dytpe (dtype ``str`` attribute) """ def dtype(arr): return getattr(arr, 'dtype', np.dtype('O')) np_types = (np.bool_, np.object_, np.number, np.character, np.void) uniq_types = set(tuple(issubclass(dtype(arr).type, np_type) for np_type in np_types) for arr in arrs) if len(uniq_types) > 1: # Embed into the exception the actual list of incompatible types. incompat_types = [dtype(arr).name for arr in arrs] tme = MergeConflictError(f'Arrays have incompatible types {incompat_types}') tme._incompat_types = incompat_types raise tme arrs = [np.empty(1, dtype=dtype(arr)) for arr in arrs] # For string-type arrays need to explicitly fill in non-zero # values or the final arr_common = .. step is unpredictable. for i, arr in enumerate(arrs): if arr.dtype.kind in ('S', 'U'): arrs[i] = [('0' if arr.dtype.kind == 'U' else b'0') * dtype_bytes_or_chars(arr.dtype)] arr_common = np.array([arr[0] for arr in arrs]) return arr_common.dtype.str class MergeStrategyMeta(type): """ Metaclass that registers MergeStrategy subclasses into the MERGE_STRATEGIES registry. """ def __new__(mcls, name, bases, members): cls = super().__new__(mcls, name, bases, members) # Wrap ``merge`` classmethod to catch any exception and re-raise as # MergeConflictError. if 'merge' in members and isinstance(members['merge'], classmethod): orig_merge = members['merge'].__func__ @wraps(orig_merge) def merge(cls, left, right): try: return orig_merge(cls, left, right) except Exception as err: raise MergeConflictError(err) cls.merge = classmethod(merge) # Register merging class (except for base MergeStrategy class) if 'types' in members: types = members['types'] if isinstance(types, tuple): types = [types] for left, right in reversed(types): MERGE_STRATEGIES.insert(0, (left, right, cls)) return cls class MergeStrategy(metaclass=MergeStrategyMeta): """ Base class for defining a strategy for merging metadata from two sources, left and right, into a single output. The primary functionality for the class is the ``merge(cls, left, right)`` class method. This takes ``left`` and ``right`` side arguments and returns a single merged output. The first class attribute is ``types``. This is defined as a list of (left_types, right_types) tuples that indicate for which input types the merge strategy applies. In determining whether to apply this merge strategy to a pair of (left, right) objects, a test is done: ``isinstance(left, left_types) and isinstance(right, right_types)``. For example:: types = [(np.ndarray, np.ndarray), # Two ndarrays (np.ndarray, (list, tuple)), # ndarray and (list or tuple) ((list, tuple), np.ndarray)] # (list or tuple) and ndarray As a convenience, ``types`` can be defined as a single two-tuple instead of a list of two-tuples, e.g. ``types = (np.ndarray, np.ndarray)``. The other class attribute is ``enabled``, which defaults to ``False`` in the base class. By defining a subclass of ``MergeStrategy`` the new merge strategy is automatically registered to be available for use in merging. However, by default the new merge strategy is *not enabled*. This prevents inadvertently changing the behavior of unrelated code that is performing metadata merge operations. In most cases (particularly in library code that others might use) it is recommended to leave custom strategies disabled and use the `~astropy.utils.metadata.enable_merge_strategies` context manager to locally enable the desired strategies. However, if one is confident that the new strategy will not produce unexpected behavior, then one can globally enable it by setting the ``enabled`` class attribute to ``True``. Examples -------- Here we define a custom merge strategy that takes an int or float on the left and right sides and returns a list with the two values. >>> from astropy.utils.metadata import MergeStrategy >>> class MergeNumbersAsList(MergeStrategy): ... types = ((int, float), (int, float)) # (left_types, right_types) ... ... @classmethod ... def merge(cls, left, right): ... return [left, right] """ # Set ``enabled = True`` to globally enable applying this merge strategy. # This is not generally recommended. enabled = False # types = [(left_types, right_types), ...] class MergePlus(MergeStrategy): """ Merge ``left`` and ``right`` objects using the plus operator. This merge strategy is globally enabled by default. """ types = [(list, list), (tuple, tuple)] enabled = True @classmethod def merge(cls, left, right): return left + right class MergeNpConcatenate(MergeStrategy): """ Merge ``left`` and ``right`` objects using np.concatenate. This merge strategy is globally enabled by default. This will upcast a list or tuple to np.ndarray and the output is always ndarray. """ types = [(np.ndarray, np.ndarray), (np.ndarray, (list, tuple)), ((list, tuple), np.ndarray)] enabled = True @classmethod def merge(cls, left, right): left, right = np.asanyarray(left), np.asanyarray(right) common_dtype([left, right]) # Ensure left and right have compatible dtype return np.concatenate([left, right]) def _both_isinstance(left, right, cls): return isinstance(left, cls) and isinstance(right, cls) def _not_equal(left, right): try: return bool(left != right) except Exception: return True class _EnableMergeStrategies: def __init__(self, *merge_strategies): self.merge_strategies = merge_strategies self.orig_enabled = {} for left_type, right_type, merge_strategy in MERGE_STRATEGIES: if issubclass(merge_strategy, merge_strategies): self.orig_enabled[merge_strategy] = merge_strategy.enabled merge_strategy.enabled = True def __enter__(self): pass def __exit__(self, type, value, tb): for merge_strategy, enabled in self.orig_enabled.items(): merge_strategy.enabled = enabled def enable_merge_strategies(*merge_strategies): """ Context manager to temporarily enable one or more custom metadata merge strategies. Examples -------- Here we define a custom merge strategy that takes an int or float on the left and right sides and returns a list with the two values. >>> from astropy.utils.metadata import MergeStrategy >>> class MergeNumbersAsList(MergeStrategy): ... types = ((int, float), # left side types ... (int, float)) # right side types ... @classmethod ... def merge(cls, left, right): ... return [left, right] By defining this class the merge strategy is automatically registered to be available for use in merging. However, by default new merge strategies are *not enabled*. This prevents inadvertently changing the behavior of unrelated code that is performing metadata merge operations. In order to use the new merge strategy, use this context manager as in the following example:: >>> from astropy.table import Table, vstack >>> from astropy.utils.metadata import enable_merge_strategies >>> t1 = Table([[1]], names=['a']) >>> t2 = Table([[2]], names=['a']) >>> t1.meta = {'m': 1} >>> t2.meta = {'m': 2} >>> with enable_merge_strategies(MergeNumbersAsList): ... t12 = vstack([t1, t2]) >>> t12.meta['m'] [1, 2] One can supply further merge strategies as additional arguments to the context manager. As a convenience, the enabling operation is actually done by checking whether the registered strategies are subclasses of the context manager arguments. This means one can define a related set of merge strategies and then enable them all at once by enabling the base class. As a trivial example, *all* registered merge strategies can be enabled with:: >>> with enable_merge_strategies(MergeStrategy): ... t12 = vstack([t1, t2]) Parameters ---------- *merge_strategies : `~astropy.utils.metadata.MergeStrategy` Merge strategies that will be enabled. """ return _EnableMergeStrategies(*merge_strategies) def _warn_str_func(key, left, right): out = ('Cannot merge meta key {0!r} types {1!r}' ' and {2!r}, choosing {0}={3!r}' .format(key, type(left), type(right), right)) return out def _error_str_func(key, left, right): out = f'Cannot merge meta key {key!r} types {type(left)!r} and {type(right)!r}' return out def merge(left, right, merge_func=None, metadata_conflicts='warn', warn_str_func=_warn_str_func, error_str_func=_error_str_func): """ Merge the ``left`` and ``right`` metadata objects. This is a simplistic and limited implementation at this point. """ if not _both_isinstance(left, right, dict): raise MergeConflictError('Can only merge two dict-based objects') out = deepcopy(left) for key, val in right.items(): # If no conflict then insert val into out dict and continue if key not in out: out[key] = deepcopy(val) continue # There is a conflict that must be resolved if _both_isinstance(left[key], right[key], dict): out[key] = merge(left[key], right[key], merge_func, metadata_conflicts=metadata_conflicts) else: try: if merge_func is None: for left_type, right_type, merge_cls in MERGE_STRATEGIES: if not merge_cls.enabled: continue if (isinstance(left[key], left_type) and isinstance(right[key], right_type)): out[key] = merge_cls.merge(left[key], right[key]) break else: raise MergeConflictError else: out[key] = merge_func(left[key], right[key]) except MergeConflictError: # Pick the metadata item that is not None, or they are both not # None, then if they are equal, there is no conflict, and if # they are different, there is a conflict and we pick the one # on the right (or raise an error). if left[key] is None: # This may not seem necessary since out[key] gets set to # right[key], but not all objects support != which is # needed for one of the if clauses. out[key] = right[key] elif right[key] is None: out[key] = left[key] elif _not_equal(left[key], right[key]): if metadata_conflicts == 'warn': warnings.warn(warn_str_func(key, left[key], right[key]), MergeConflictWarning) elif metadata_conflicts == 'error': raise MergeConflictError(error_str_func(key, left[key], right[key])) elif metadata_conflicts != 'silent': raise ValueError('metadata_conflicts argument must be one ' 'of "silent", "warn", or "error"') out[key] = right[key] else: out[key] = right[key] return out class MetaData: """ A descriptor for classes that have a ``meta`` property. This can be set to any valid `~collections.abc.Mapping`. Parameters ---------- doc : `str`, optional Documentation for the attribute of the class. Default is ``""``. .. versionadded:: 1.2 copy : `bool`, optional If ``True`` the the value is deepcopied before setting, otherwise it is saved as reference. Default is ``True``. .. versionadded:: 1.2 """ def __init__(self, doc="", copy=True): self.__doc__ = doc self.copy = copy def __get__(self, instance, owner): if instance is None: return self if not hasattr(instance, '_meta'): instance._meta = OrderedDict() return instance._meta def __set__(self, instance, value): if value is None: instance._meta = OrderedDict() else: if isinstance(value, Mapping): if self.copy: instance._meta = deepcopy(value) else: instance._meta = value else: raise TypeError("meta attribute must be dict-like") class MetaAttribute: """ Descriptor to define custom attribute which gets stored in the object ``meta`` dict and can have a defined default. This descriptor is intended to provide a convenient way to add attributes to a subclass of a complex class such as ``Table`` or ``NDData``. This requires that the object has an attribute ``meta`` which is a dict-like object. The value of the MetaAttribute will be stored in a new dict meta['__attributes__'] that is created when required. Classes that define MetaAttributes are encouraged to support initializing the attributes via the class ``__init__``. For example:: for attr in list(kwargs): descr = getattr(self.__class__, attr, None) if isinstance(descr, MetaAttribute): setattr(self, attr, kwargs.pop(attr)) The name of a ``MetaAttribute`` cannot be the same as any of the following: - Keyword argument in the owner class ``__init__`` - Method or attribute of the "parent class", where the parent class is taken to be ``owner.__mro__[1]``. :param default: default value """ def __init__(self, default=None): self.default = default def __get__(self, instance, owner): # When called without an instance, return self to allow access # to descriptor attributes. if instance is None: return self # If default is None and value has not been set already then return None # without doing touching meta['__attributes__'] at all. This helps e.g. # with the Table._hidden_columns attribute so it doesn't auto-create # meta['__attributes__'] always. if (self.default is None and self.name not in instance.meta.get('__attributes__', {})): return None # Get the __attributes__ dict and create if not there already. attributes = instance.meta.setdefault('__attributes__', {}) try: value = attributes[self.name] except KeyError: if self.default is not None: attributes[self.name] = deepcopy(self.default) # Return either specified default or None value = attributes.get(self.name) return value def __set__(self, instance, value): # Get the __attributes__ dict and create if not there already. attributes = instance.meta.setdefault('__attributes__', {}) attributes[self.name] = value def __delete__(self, instance): # Remove this attribute from meta['__attributes__'] if it exists. if '__attributes__' in instance.meta: attrs = instance.meta['__attributes__'] if self.name in attrs: del attrs[self.name] # If this was the last attribute then remove the meta key as well if not attrs: del instance.meta['__attributes__'] def __set_name__(self, owner, name): import inspect params = [param.name for param in inspect.signature(owner).parameters.values() if param.kind not in (inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL)] # Reject names from existing params or best guess at parent class if name in params or hasattr(owner.__mro__[1], name): raise ValueError(f'{name} not allowed as {self.__class__.__name__}') self.name = name def __repr__(self): return f'<{self.__class__.__name__} name={self.name} default={self.default}>'
a216f86a94106d0ca2d60b02a58ff7db5c966dc9d6ac3b963104d85f022d87af
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains developer-oriented utilities used by Astropy. Public functions and classes in this subpackage are safe to be used by other packages, but this subpackage is for utilities that are primarily of use for developers or to implement python hacks. This subpackage also includes the ``astropy.utils.compat`` package, which houses utilities that provide compatibility and bugfixes across all versions of Python that Astropy supports. However, the content of this module is solely for internal use of ``astropy`` and subject to changes without deprecations. Do not use it in external packages or code. """ from .codegen import * # noqa from .decorators import * # noqa from .introspection import * # noqa from .misc import * # noqa from .shapes import * # noqa
8fd62b3c6f47d0acfbda1416947ac9cccb266615ef77f9c5a066aea9a0ac42c2
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains errors/exceptions and warnings of general use for astropy. Exceptions that are specific to a given subpackage should *not* be here, but rather in the particular subpackage. """ # TODO: deprecate these. This cannot be trivially done with # astropy.utils.decorators.deprecate, since that module needs the exceptions # here, leading to circular import problems. from erfa import ErfaError, ErfaWarning # noqa __all__ = [ 'AstropyWarning', 'AstropyUserWarning', 'AstropyDeprecationWarning', 'AstropyPendingDeprecationWarning', 'AstropyBackwardsIncompatibleChangeWarning', 'DuplicateRepresentationWarning', 'NoValue'] class AstropyWarning(Warning): """ The base warning class from which all Astropy warnings should inherit. Any warning inheriting from this class is handled by the Astropy logger. """ class AstropyUserWarning(UserWarning, AstropyWarning): """ The primary warning class for Astropy. Use this if you do not need a specific sub-class. """ class AstropyDeprecationWarning(AstropyWarning): """ A warning class to indicate a deprecated feature. """ class AstropyPendingDeprecationWarning(PendingDeprecationWarning, AstropyWarning): """ A warning class to indicate a soon-to-be deprecated feature. """ class AstropyBackwardsIncompatibleChangeWarning(AstropyWarning): """ A warning class indicating a change in astropy that is incompatible with previous versions. The suggested procedure is to issue this warning for the version in which the change occurs, and remove it for all following versions. """ class DuplicateRepresentationWarning(AstropyWarning): """ A warning class indicating a representation name was already registered """ class _NoValue: """Special keyword value. This class may be used as the default value assigned to a deprecated keyword in order to check if it has been given a user defined value. """ def __repr__(self): return 'astropy.utils.exceptions.NoValue' NoValue = _NoValue()
c336ae5a955b4aa936c18c94cf3b790799c76490a67ee9a0789288d9ba777421
# Licensed under a 3-clause BSD style license - see LICENSE.rst from setuptools import Extension from os.path import dirname, join, relpath ASTROPY_UTILS_ROOT = dirname(__file__) def get_extensions(): return [ Extension('astropy.utils._compiler', [relpath(join(ASTROPY_UTILS_ROOT, 'src', 'compiler.c'))]) ]
97d28a9c6e134bddf422075bad2ff2397365f547a8ae093eeb24b36b7d3cc1a4
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """This module contains functions and methods that relate to the DataInfo class which provides a container for informational attributes as well as summary info methods. A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in astropy. Here it allows those classes to be used in Tables and uniformly carry table column attributes such as name, format, dtype, meta, and description. """ # Note: these functions and classes are tested extensively in astropy table # tests via their use in providing mixin column info, and in # astropy/tests/test_info for providing table and column info summary data. import os import re import sys import weakref import warnings from io import StringIO from copy import deepcopy from functools import partial from collections import OrderedDict from contextlib import contextmanager import numpy as np from . import metadata __all__ = ['data_info_factory', 'dtype_info_name', 'BaseColumnInfo', 'DataInfo', 'MixinInfo', 'ParentDtypeInfo'] # Tuple of filterwarnings kwargs to ignore when calling info IGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|' 'Mean of empty slice|Degrees of freedom <= 0|' 'invalid value encountered in sqrt'),) @contextmanager def serialize_context_as(context): """Set context for serialization. This will allow downstream code to understand the context in which a column is being serialized. Objects like Time or SkyCoord will have different default serialization representations depending on context. Parameters ---------- context : str Context name, e.g. 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml' """ old_context = BaseColumnInfo._serialize_context BaseColumnInfo._serialize_context = context try: yield finally: BaseColumnInfo._serialize_context = old_context def dtype_info_name(dtype): """Return a human-oriented string name of the ``dtype`` arg. This can be use by astropy methods that present type information about a data object. The output is mostly equivalent to ``dtype.name`` which takes the form <type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an optional number of bits which gets included only for numeric types. The output is shown below for ``bytes`` and ``str`` types, with <N> being the number of characters. This representation corresponds to the Python type that matches the dtype:: Numpy S<N> U<N> Python bytes<N> str<N> Parameters ---------- dtype : str, `~numpy.dtype`, type Input as an object that can be converted via :class:`numpy.dtype`. Returns ------- dtype_info_name : str String name of ``dtype`` """ dtype = np.dtype(dtype) if dtype.kind in ('S', 'U'): type_name = 'bytes' if dtype.kind == 'S' else 'str' length = re.search(r'(\d+)', dtype.str).group(1) out = type_name + length else: out = dtype.name return out def data_info_factory(names, funcs): """ Factory to create a function that can be used as an ``option`` for outputting data object summary information. Examples -------- >>> from astropy.utils.data_info import data_info_factory >>> from astropy.table import Column >>> c = Column([4., 3., 2., 1.]) >>> mystats = data_info_factory(names=['min', 'median', 'max'], ... funcs=[np.min, np.median, np.max]) >>> c.info(option=mystats) min = 1 median = 2.5 max = 4 n_bad = 0 length = 4 Parameters ---------- names : list List of information attribute names funcs : list List of functions that compute the corresponding information attribute Returns ------- func : function Function that can be used as a data info option """ def func(dat): outs = [] for name, func in zip(names, funcs): try: if isinstance(func, str): out = getattr(dat, func)() else: out = func(dat) except Exception: outs.append('--') else: try: outs.append(f'{out:g}') except (TypeError, ValueError): outs.append(str(out)) return OrderedDict(zip(names, outs)) return func def _get_obj_attrs_map(obj, attrs): """ Get the values for object ``attrs`` and return as a dict. This ignores any attributes that are None. In the context of serializing the supported core astropy classes this conversion will succeed and results in more succinct and less python-specific YAML. """ out = {} for attr in attrs: val = getattr(obj, attr, None) if val is not None: out[attr] = val return out def _get_data_attribute(dat, attr=None): """ Get a data object attribute for the ``attributes`` info summary method """ if attr == 'class': val = type(dat).__name__ elif attr == 'dtype': val = dtype_info_name(dat.info.dtype) elif attr == 'shape': datshape = dat.shape[1:] val = datshape if datshape else '' else: val = getattr(dat.info, attr) if val is None: val = '' return str(val) class InfoAttribute: def __init__(self, attr, default=None): self.attr = attr self.default = default def __get__(self, instance, owner_cls): if instance is None: return self return instance._attrs.get(self.attr, self.default) def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError('cannot set unbound descriptor') instance._attrs[self.attr] = value class ParentAttribute: def __init__(self, attr): self.attr = attr def __get__(self, instance, owner_cls): if instance is None: return self return getattr(instance._parent, self.attr) def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError('cannot set unbound descriptor') setattr(instance._parent, self.attr, value) class DataInfoMeta(type): def __new__(mcls, name, bases, dct): # Ensure that we do not gain a __dict__, which would mean # arbitrary attributes could be set. dct.setdefault('__slots__', []) return super().__new__(mcls, name, bases, dct) def __init__(cls, name, bases, dct): super().__init__(name, bases, dct) # Define default getters/setters for attributes, if needed. for attr in cls.attr_names: if attr not in dct: # If not defined explicitly for this class, did any of # its superclasses define it, and, if so, was this an # automatically defined look-up-on-parent attribute? cls_attr = getattr(cls, attr, None) if attr in cls.attrs_from_parent: # If the attribute is supposed to be stored on the parent, # and that is stated by this class yet it was not the case # on the superclass, override it. if 'attrs_from_parent' in dct and not isinstance(cls_attr, ParentAttribute): setattr(cls, attr, ParentAttribute(attr)) elif not cls_attr or isinstance(cls_attr, ParentAttribute): # If the attribute is not meant to be stored on the parent, # and if it was not defined already or was previously defined # as an attribute on the parent, define a regular # look-up-on-info attribute setattr(cls, attr, InfoAttribute(attr, cls._attr_defaults.get(attr))) class DataInfo(metaclass=DataInfoMeta): """ Descriptor that data classes use to add an ``info`` attribute for storing data attributes in a uniform and portable way. Note that it *must* be called ``info`` so that the DataInfo() object can be stored in the ``instance`` using the ``info`` key. Because owner_cls.x is a descriptor, Python doesn't use __dict__['x'] normally, and the descriptor can safely store stuff there. Thanks to https://nbviewer.jupyter.org/urls/gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb for this trick that works for non-hashable classes. Parameters ---------- bound : bool If True this is a descriptor attribute in a class definition, else it is a DataInfo() object that is bound to a data object instance. Default is False. """ _stats = ['mean', 'std', 'min', 'max'] attrs_from_parent = set() attr_names = set(['name', 'unit', 'dtype', 'format', 'description', 'meta']) _attr_defaults = {'dtype': np.dtype('O')} _attrs_no_copy = set() _info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class') __slots__ = ['_parent_cls', '_parent_ref', '_attrs'] # This specifies the list of object attributes which must be stored in # order to re-create the object after serialization. This is independent # of normal `info` attributes like name or description. Subclasses will # generally either define this statically (QuantityInfo) or dynamically # (SkyCoordInfo). These attributes may be scalars or arrays. If arrays # that match the object length they will be serialized as an independent # column. _represent_as_dict_attrs = () # This specifies attributes which are to be provided to the class # initializer as ordered args instead of keyword args. This is needed # for Quantity subclasses where the keyword for data varies (e.g. # between Quantity and Angle). _construct_from_dict_args = () # This specifies the name of an attribute which is the "primary" data. # Then when representing as columns # (table.serialize._represent_mixin_as_column) the output for this # attribute will be written with the just name of the mixin instead of the # usual "<name>.<attr>". _represent_as_dict_primary_data = None def __init__(self, bound=False): # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. Default of None for "unset" # except for dtype where the default is object. if bound: self._attrs = {} @property def _parent(self): try: parent = self._parent_ref() except AttributeError: return None if parent is None: raise AttributeError("""\ failed to access "info" attribute on a temporary object. It looks like you have done something like ``col[3:5].info`` or ``col.quantity.info``, i.e. you accessed ``info`` from a temporary slice object that only exists momentarily. This has failed because the reference to that temporary object is now lost. Instead force a permanent reference (e.g. ``c = col[3:5]`` followed by ``c.info``).""") return parent def __get__(self, instance, owner_cls): if instance is None: # This is an unbound descriptor on the class self._parent_cls = owner_cls return self info = instance.__dict__.get('info') if info is None: info = instance.__dict__['info'] = self.__class__(bound=True) # We set _parent_ref on every call, since if one makes copies of # instances, 'info' will be copied as well, which will lose the # reference. info._parent_ref = weakref.ref(instance) return info def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError('cannot set unbound descriptor') if isinstance(value, DataInfo): info = instance.__dict__['info'] = self.__class__(bound=True) attr_names = info.attr_names if value.__class__ is self.__class__: # For same class, attributes are guaranteed to be stored in # _attrs, so speed matters up by not accessing defaults. # Doing this before difference in for loop helps speed. attr_names = attr_names & set(value._attrs) # NOT in-place! else: # For different classes, copy over the attributes in common. attr_names = attr_names & (value.attr_names - value._attrs_no_copy) for attr in attr_names - info.attrs_from_parent - info._attrs_no_copy: info._attrs[attr] = deepcopy(getattr(value, attr)) else: raise TypeError('info must be set with a DataInfo instance') def __getstate__(self): return self._attrs def __setstate__(self, state): self._attrs = state def _represent_as_dict(self, attrs=None): """Get the values for the parent ``attrs`` and return as a dict. By default, uses '_represent_as_dict_attrs'. """ if attrs is None: attrs = self._represent_as_dict_attrs return _get_obj_attrs_map(self._parent, attrs) def _construct_from_dict(self, map): args = [map.pop(attr) for attr in self._construct_from_dict_args] return self._parent_cls(*args, **map) info_summary_attributes = staticmethod( data_info_factory(names=_info_summary_attrs, funcs=[partial(_get_data_attribute, attr=attr) for attr in _info_summary_attrs])) # No nan* methods in numpy < 1.8 info_summary_stats = staticmethod( data_info_factory(names=_stats, funcs=[getattr(np, 'nan' + stat) for stat in _stats])) def __call__(self, option='attributes', out=''): """ Write summary information about data object to the ``out`` filehandle. By default this prints to standard output via sys.stdout. The ``option`` argument specifies what type of information to include. This can be a string, a function, or a list of strings or functions. Built-in options are: - ``attributes``: data object attributes like ``dtype`` and ``format`` - ``stats``: basic statistics: min, mean, and max If a function is specified then that function will be called with the data object as its single argument. The function must return an OrderedDict containing the information attributes. If a list is provided then the information attributes will be appended for each of the options, in order. Examples -------- >>> from astropy.table import Column >>> c = Column([1, 2], unit='m', dtype='int32') >>> c.info() dtype = int32 unit = m class = Column n_bad = 0 length = 2 >>> c.info(['attributes', 'stats']) dtype = int32 unit = m class = Column mean = 1.5 std = 0.5 min = 1 max = 2 n_bad = 0 length = 2 Parameters ---------- option : str, callable, list of (str or callable) Info option, defaults to 'attributes'. out : file-like, None Output destination, defaults to sys.stdout. If None then the OrderedDict with information attributes is returned Returns ------- info : `~collections.OrderedDict` or None `~collections.OrderedDict` if out==None else None """ if out == '': out = sys.stdout dat = self._parent info = OrderedDict() name = dat.info.name if name is not None: info['name'] = name options = option if isinstance(option, (list, tuple)) else [option] for option in options: if isinstance(option, str): if hasattr(self, 'info_summary_' + option): option = getattr(self, 'info_summary_' + option) else: raise ValueError('option={} is not an allowed information type' .format(option)) with warnings.catch_warnings(): for ignore_kwargs in IGNORE_WARNINGS: warnings.filterwarnings('ignore', **ignore_kwargs) info.update(option(dat)) if hasattr(dat, 'mask'): n_bad = np.count_nonzero(dat.mask) else: try: n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat)) except Exception: n_bad = 0 info['n_bad'] = n_bad try: info['length'] = len(dat) except (TypeError, IndexError): pass if out is None: return info for key, val in info.items(): if val != '': out.write(f'{key} = {val}' + os.linesep) def __repr__(self): if self._parent is None: return super().__repr__() out = StringIO() self.__call__(out=out) return out.getvalue() class BaseColumnInfo(DataInfo): """ Base info class for anything that can be a column in an astropy Table. There are at least two classes that inherit from this: ColumnInfo: for native astropy Column / MaskedColumn objects MixinInfo: for mixin column objects Note that this class is defined here so that mixins can use it without importing the table package. """ attr_names = DataInfo.attr_names.union(['parent_table', 'indices']) _attrs_no_copy = set(['parent_table', 'indices']) # Context for serialization. This can be set temporarily via # ``serialize_context_as(context)`` context manager to allow downstream # code to understand the context in which a column is being serialized. # Typical values are 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml'. Objects # like Time or SkyCoord will have different default serialization # representations depending on context. _serialize_context = None __slots__ = ['_format_funcs', '_copy_indices'] @property def parent_table(self): value = self._attrs.get('parent_table') if callable(value): value = value() return value @parent_table.setter def parent_table(self, parent_table): if parent_table is None: self._attrs.pop('parent_table', None) else: parent_table = weakref.ref(parent_table) self._attrs['parent_table'] = parent_table def __init__(self, bound=False): super().__init__(bound=bound) # If bound to a data object instance then add a _format_funcs dict # for caching functions for print formatting. if bound: self._format_funcs = {} def __set__(self, instance, value): # For Table columns do not set `info` when the instance is a scalar. try: if not instance.shape: return except AttributeError: pass super().__set__(instance, value) def iter_str_vals(self): """ This is a mixin-safe version of Column.iter_str_vals. """ col = self._parent if self.parent_table is None: from astropy.table.column import FORMATTER as formatter else: formatter = self.parent_table.formatter _pformat_col_iter = formatter._pformat_col_iter for str_val in _pformat_col_iter(col, -1, False, False, {}): yield str_val @property def indices(self): # Implementation note: the auto-generation as an InfoAttribute cannot # be used here, since on access, one should not just return the # default (empty list is this case), but set _attrs['indices'] so that # if the list is appended to, it is registered here. return self._attrs.setdefault('indices', []) @indices.setter def indices(self, indices): self._attrs['indices'] = indices def adjust_indices(self, index, value, col_len): ''' Adjust info indices after column modification. Parameters ---------- index : slice, int, list, or ndarray Element(s) of column to modify. This parameter can be a single row number, a list of row numbers, an ndarray of row numbers, a boolean ndarray (a mask), or a column slice. value : int, list, or ndarray New value(s) to insert col_len : int Length of the column ''' if not self.indices: return if isinstance(index, slice): # run through each key in slice t = index.indices(col_len) keys = list(range(*t)) elif isinstance(index, np.ndarray) and index.dtype.kind == 'b': # boolean mask keys = np.where(index)[0] else: # single int keys = [index] value = np.atleast_1d(value) # turn array(x) into array([x]) if value.size == 1: # repeat single value value = list(value) * len(keys) for key, val in zip(keys, value): for col_index in self.indices: col_index.replace(key, self.name, val) def slice_indices(self, col_slice, item, col_len): ''' Given a sliced object, modify its indices to correctly represent the slice. Parameters ---------- col_slice : `~astropy.table.Column` or mixin Sliced object. If not a column, it must be a valid mixin, see https://docs.astropy.org/en/stable/table/mixin_columns.html item : slice, list, or ndarray Slice used to create col_slice col_len : int Length of original object ''' from astropy.table.sorted_array import SortedArray if not getattr(self, '_copy_indices', True): # Necessary because MaskedArray will perform a shallow copy col_slice.info.indices = [] return col_slice elif isinstance(item, slice): col_slice.info.indices = [x[item] for x in self.indices] elif self.indices: if isinstance(item, np.ndarray) and item.dtype.kind == 'b': # boolean mask item = np.where(item)[0] # Empirical testing suggests that recreating a BST/RBT index is # more effective than relabelling when less than ~60% of # the total number of rows are involved, and is in general # more effective for SortedArray. small = len(item) <= 0.6 * col_len col_slice.info.indices = [] for index in self.indices: if small or isinstance(index, SortedArray): new_index = index.get_slice(col_slice, item) else: new_index = deepcopy(index) new_index.replace_rows(item) col_slice.info.indices.append(new_index) return col_slice @staticmethod def merge_cols_attributes(cols, metadata_conflicts, name, attrs): """ Utility method to merge and validate the attributes ``attrs`` for the input table columns ``cols``. Note that ``dtype`` and ``shape`` attributes are handled specially. These should not be passed in ``attrs`` but will always be in the returned dict of merged attributes. Parameters ---------- cols : list List of input Table column objects metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name attrs : list List of attribute names to be merged Returns ------- attrs : dict Of merged attributes. """ from astropy.table.np_utils import TableMergeError def warn_str_func(key, left, right): out = ("In merged column '{}' the '{}' attribute does not match " "({} != {}). Using {} for merged output" .format(name, key, left, right, right)) return out def getattrs(col): return {attr: getattr(col.info, attr) for attr in attrs if getattr(col.info, attr, None) is not None} out = getattrs(cols[0]) for col in cols[1:]: out = metadata.merge(out, getattrs(col), metadata_conflicts=metadata_conflicts, warn_str_func=warn_str_func) # Output dtype is the superset of all dtypes in in_cols out['dtype'] = metadata.common_dtype(cols) # Make sure all input shapes are the same uniq_shapes = set(col.shape[1:] for col in cols) if len(uniq_shapes) != 1: raise TableMergeError('columns have different shapes') out['shape'] = uniq_shapes.pop() # "Merged" output name is the supplied name if name is not None: out['name'] = name return out def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. The base method raises NotImplementedError and must be overridden. Returns ------- arrays : list of ndarray """ raise NotImplementedError(f'column {self.name} is not sortable') class MixinInfo(BaseColumnInfo): @property def name(self): return self._attrs.get('name') @name.setter def name(self, name): # For mixin columns that live within a table, rename the column in the # table when setting the name attribute. This mirrors the same # functionality in the BaseColumn class. if self.parent_table is not None: new_name = None if name is None else str(name) self.parent_table.columns._rename_column(self.name, new_name) self._attrs['name'] = name class ParentDtypeInfo(MixinInfo): """Mixin that gets info.dtype from parent""" attrs_from_parent = set(['dtype']) # dtype and unit taken from parent
f86c15d4faba6f58d123ee1f2ce1116be8eabc1270310cbc21fb260468733174
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Wrappers for PLY to provide thread safety. """ import contextlib import functools import re import os import threading __all__ = ['lex', 'ThreadSafeParser', 'yacc'] _TAB_HEADER = """# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # This file was automatically generated from ply. To re-generate this file, # remove it from this folder, then build astropy and run the tests in-place: # # python setup.py build_ext --inplace # pytest {package} # # You can then commit the changes to this file. """ _LOCK = threading.RLock() def _add_tab_header(filename, package): with open(filename, 'r') as f: contents = f.read() with open(filename, 'w') as f: f.write(_TAB_HEADER.format(package=package)) f.write(contents) @contextlib.contextmanager def _patch_get_caller_module_dict(module): """Temporarily replace the module's get_caller_module_dict. This is a function inside ``ply.lex`` and ``ply.yacc`` (each has a copy) that is used to retrieve the caller's local symbols. Here, we patch the function to instead retrieve the grandparent's local symbols to account for a wrapper layer. """ original = module.get_caller_module_dict @functools.wraps(original) def wrapper(levels): # Add 2, not 1, because the wrapper itself adds another level return original(levels + 2) module.get_caller_module_dict = wrapper yield module.get_caller_module_dict = original def lex(lextab, package, reflags=int(re.VERBOSE)): """Create a lexer from local variables. It automatically compiles the lexer in optimized mode, writing to ``lextab`` in the same directory as the calling file. This function is thread-safe. The returned lexer is *not* thread-safe, but if it is used exclusively with a single parser returned by :func:`yacc` then it will be safe. It is only intended to work with lexers defined within the calling function, rather than at class or module scope. Parameters ---------- lextab : str Name for the file to write with the generated tables, if it does not already exist (without ``.py`` suffix). package : str Name of a test package which should be run with pytest to regenerate the output file. This is inserted into a comment in the generated file. reflags : int Passed to ``ply.lex``. """ from astropy.extern.ply import lex caller_file = lex.get_caller_module_dict(2)['__file__'] lextab_filename = os.path.join(os.path.dirname(caller_file), lextab + '.py') with _LOCK: lextab_exists = os.path.exists(lextab_filename) with _patch_get_caller_module_dict(lex): lexer = lex.lex(optimize=True, lextab=lextab, outputdir=os.path.dirname(caller_file), reflags=reflags) if not lextab_exists: _add_tab_header(lextab_filename, package) return lexer class ThreadSafeParser: """Wrap a parser produced by ``ply.yacc.yacc``. It provides a :meth:`parse` method that is thread-safe. """ def __init__(self, parser): self.parser = parser self._lock = threading.RLock() def parse(self, *args, **kwargs): """Run the wrapped parser, with a lock to ensure serialization.""" with self._lock: return self.parser.parse(*args, **kwargs) def yacc(tabmodule, package): """Create a parser from local variables. It automatically compiles the parser in optimized mode, writing to ``tabmodule`` in the same directory as the calling file. This function is thread-safe, and the returned parser is also thread-safe, provided that it does not share a lexer with any other parser. It is only intended to work with parsers defined within the calling function, rather than at class or module scope. Parameters ---------- tabmodule : str Name for the file to write with the generated tables, if it does not already exist (without ``.py`` suffix). package : str Name of a test package which should be run with pytest to regenerate the output file. This is inserted into a comment in the generated file. """ from astropy.extern.ply import yacc caller_file = yacc.get_caller_module_dict(2)['__file__'] tab_filename = os.path.join(os.path.dirname(caller_file), tabmodule + '.py') with _LOCK: tab_exists = os.path.exists(tab_filename) with _patch_get_caller_module_dict(yacc): parser = yacc.yacc(tabmodule=tabmodule, outputdir=os.path.dirname(caller_file), debug=False, optimize=True, write_tables=True) if not tab_exists: _add_tab_header(tab_filename, package) return ThreadSafeParser(parser)
70f74ffb7676bef61af0a7f50a507ae1bbe4c21bf7494ebbb1d2c8802fe09add
# Licensed under a 3-clause BSD style license - see LICENSE.rst """The ShapedLikeNDArray mixin class and shape-related functions.""" import abc from itertools import zip_longest import numpy as np __all__ = ['NDArrayShapeMethods', 'ShapedLikeNDArray', 'check_broadcast', 'IncompatibleShapeError', 'unbroadcast'] class NDArrayShapeMethods: """Mixin class to provide shape-changing methods. The class proper is assumed to have some underlying data, which are arrays or array-like structures. It must define a ``shape`` property, which gives the shape of those data, as well as an ``_apply`` method that creates a new instance in which a `~numpy.ndarray` method has been applied to those. Furthermore, for consistency with `~numpy.ndarray`, it is recommended to define a setter for the ``shape`` property, which, like the `~numpy.ndarray.shape` property allows in-place reshaping the internal data (and, unlike the ``reshape`` method raises an exception if this is not possible). This class only provides the shape-changing methods and is meant in particular for `~numpy.ndarray` subclasses that need to keep track of other arrays. For other classes, `~astropy.utils.shapes.ShapedLikeNDArray` is recommended. """ # Note to developers: if new methods are added here, be sure to check that # they work properly with the classes that use this, such as Time and # BaseRepresentation, i.e., look at their ``_apply`` methods and add # relevant tests. This is particularly important for methods that imply # copies rather than views of data (see the special-case treatment of # 'flatten' in Time). def __getitem__(self, item): return self._apply('__getitem__', item) def copy(self, *args, **kwargs): """Return an instance containing copies of the internal data. Parameters are as for :meth:`~numpy.ndarray.copy`. """ return self._apply('copy', *args, **kwargs) def reshape(self, *args, **kwargs): """Returns an instance containing the same data with a new shape. Parameters are as for :meth:`~numpy.ndarray.reshape`. Note that it is not always possible to change the shape of an array without copying the data (see :func:`~numpy.reshape` documentation). If you want an error to be raise if the data is copied, you should assign the new shape to the shape attribute (note: this may not be implemented for all classes using ``NDArrayShapeMethods``). """ return self._apply('reshape', *args, **kwargs) def ravel(self, *args, **kwargs): """Return an instance with the array collapsed into one dimension. Parameters are as for :meth:`~numpy.ndarray.ravel`. Note that it is not always possible to unravel an array without copying the data. If you want an error to be raise if the data is copied, you should should assign shape ``(-1,)`` to the shape attribute. """ return self._apply('ravel', *args, **kwargs) def flatten(self, *args, **kwargs): """Return a copy with the array collapsed into one dimension. Parameters are as for :meth:`~numpy.ndarray.flatten`. """ return self._apply('flatten', *args, **kwargs) def transpose(self, *args, **kwargs): """Return an instance with the data transposed. Parameters are as for :meth:`~numpy.ndarray.transpose`. All internal data are views of the data of the original. """ return self._apply('transpose', *args, **kwargs) @property def T(self): """Return an instance with the data transposed. Parameters are as for :attr:`~numpy.ndarray.T`. All internal data are views of the data of the original. """ if self.ndim < 2: return self else: return self.transpose() def swapaxes(self, *args, **kwargs): """Return an instance with the given axes interchanged. Parameters are as for :meth:`~numpy.ndarray.swapaxes`: ``axis1, axis2``. All internal data are views of the data of the original. """ return self._apply('swapaxes', *args, **kwargs) def diagonal(self, *args, **kwargs): """Return an instance with the specified diagonals. Parameters are as for :meth:`~numpy.ndarray.diagonal`. All internal data are views of the data of the original. """ return self._apply('diagonal', *args, **kwargs) def squeeze(self, *args, **kwargs): """Return an instance with single-dimensional shape entries removed Parameters are as for :meth:`~numpy.ndarray.squeeze`. All internal data are views of the data of the original. """ return self._apply('squeeze', *args, **kwargs) def take(self, indices, axis=None, out=None, mode='raise'): """Return a new instance formed from the elements at the given indices. Parameters are as for :meth:`~numpy.ndarray.take`, except that, obviously, no output array can be given. """ if out is not None: return NotImplementedError("cannot pass 'out' argument to 'take.") return self._apply('take', indices, axis=axis, mode=mode) class ShapedLikeNDArray(NDArrayShapeMethods, metaclass=abc.ABCMeta): """Mixin class to provide shape-changing methods. The class proper is assumed to have some underlying data, which are arrays or array-like structures. It must define a ``shape`` property, which gives the shape of those data, as well as an ``_apply`` method that creates a new instance in which a `~numpy.ndarray` method has been applied to those. Furthermore, for consistency with `~numpy.ndarray`, it is recommended to define a setter for the ``shape`` property, which, like the `~numpy.ndarray.shape` property allows in-place reshaping the internal data (and, unlike the ``reshape`` method raises an exception if this is not possible). This class also defines default implementations for ``ndim`` and ``size`` properties, calculating those from the ``shape``. These can be overridden by subclasses if there are faster ways to obtain those numbers. """ # Note to developers: if new methods are added here, be sure to check that # they work properly with the classes that use this, such as Time and # BaseRepresentation, i.e., look at their ``_apply`` methods and add # relevant tests. This is particularly important for methods that imply # copies rather than views of data (see the special-case treatment of # 'flatten' in Time). @property @abc.abstractmethod def shape(self): """The shape of the underlying data.""" @abc.abstractmethod def _apply(method, *args, **kwargs): """Create a new instance, with ``method`` applied to underlying data. The method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.). It will be applied to the underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`), with the results used to create a new instance. Parameters ---------- method : str Method to be applied to the instance's internal data arrays. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. """ @property def ndim(self): """The number of dimensions of the instance and underlying arrays.""" return len(self.shape) @property def size(self): """The size of the object, as calculated from its shape.""" size = 1 for sh in self.shape: size *= sh return size @property def isscalar(self): return self.shape == () def __len__(self): if self.isscalar: raise TypeError("Scalar {!r} object has no len()" .format(self.__class__.__name__)) return self.shape[0] def __bool__(self): """Any instance should evaluate to True, except when it is empty.""" return self.size > 0 def __getitem__(self, item): try: return self._apply('__getitem__', item) except IndexError: if self.isscalar: raise TypeError('scalar {!r} object is not subscriptable.' .format(self.__class__.__name__)) else: raise def __iter__(self): if self.isscalar: raise TypeError('scalar {!r} object is not iterable.' .format(self.__class__.__name__)) # We cannot just write a generator here, since then the above error # would only be raised once we try to use the iterator, rather than # upon its definition using iter(self). def self_iter(): for idx in range(len(self)): yield self[idx] return self_iter() # Functions that change shape or essentially do indexing. _APPLICABLE_FUNCTIONS = { np.moveaxis, np.rollaxis, np.atleast_1d, np.atleast_2d, np.atleast_3d, np.expand_dims, np.broadcast_to, np.flip, np.fliplr, np.flipud, np.rot90, np.roll, np.delete, } # Functions that themselves defer to a method. Those are all # defined in np.core.fromnumeric, but exclude alen as well as # sort and partition, which make copies before calling the method. _METHOD_FUNCTIONS = {getattr(np, name): {'amax': 'max', 'amin': 'min', 'around': 'round', 'round_': 'round', 'alltrue': 'all', 'sometrue': 'any'}.get(name, name) for name in np.core.fromnumeric.__all__ if name not in ['alen', 'sort', 'partition']} # Add np.copy, which we may as well let defer to our method. _METHOD_FUNCTIONS[np.copy] = 'copy' # Could be made to work with a bit of effort: # np.where, np.compress, np.extract, # np.diag_indices_from, np.triu_indices_from, np.tril_indices_from # np.tile, np.repeat (need .repeat method) # TODO: create a proper implementation. # Furthermore, some arithmetic functions such as np.mean, np.median, # could work for Time, and many more for TimeDelta, so those should # override __array_function__. def __array_function__(self, function, types, args, kwargs): """Wrap numpy functions that make sense.""" if function in self._APPLICABLE_FUNCTIONS: if function is np.broadcast_to: # Ensure that any ndarray subclasses used are # properly propagated. kwargs.setdefault('subok', True) elif (function in {np.atleast_1d, np.atleast_2d, np.atleast_3d} and len(args) > 1): return tuple(function(arg, **kwargs) for arg in args) if self is not args[0]: return NotImplemented return self._apply(function, *args[1:], **kwargs) # For functions that defer to methods, use the corresponding # method/attribute if we have it. Otherwise, fall through. if self is args[0] and function in self._METHOD_FUNCTIONS: method = getattr(self, self._METHOD_FUNCTIONS[function], None) if method is not None: if callable(method): return method(*args[1:], **kwargs) else: # For np.shape, etc., just return the attribute. return method # Fall-back, just pass the arguments on since perhaps the function # works already (see above). return function.__wrapped__(*args, **kwargs) class IncompatibleShapeError(ValueError): def __init__(self, shape_a, shape_a_idx, shape_b, shape_b_idx): super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx) def check_broadcast(*shapes): """ Determines whether two or more Numpy arrays can be broadcast with each other based on their shape tuple alone. Parameters ---------- *shapes : tuple All shapes to include in the comparison. If only one shape is given it is passed through unmodified. If no shapes are given returns an empty `tuple`. Returns ------- broadcast : `tuple` If all shapes are mutually broadcastable, returns a tuple of the full broadcast shape. """ if len(shapes) == 0: return () elif len(shapes) == 1: return shapes[0] reversed_shapes = (reversed(shape) for shape in shapes) full_shape = [] for dims in zip_longest(*reversed_shapes, fillvalue=1): max_dim = 1 max_dim_idx = None for idx, dim in enumerate(dims): if dim == 1: continue if max_dim == 1: # The first dimension of size greater than 1 max_dim = dim max_dim_idx = idx elif dim != max_dim: raise IncompatibleShapeError( shapes[max_dim_idx], max_dim_idx, shapes[idx], idx) full_shape.append(max_dim) return tuple(full_shape[::-1]) def unbroadcast(array): """ Given an array, return a new array that is the smallest subset of the original array that can be re-broadcasted back to the original array. See https://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays for more details. """ if array.ndim == 0: return array array = array[tuple((slice(0, 1) if stride == 0 else slice(None)) for stride in array.strides)] # Remove leading ones, which are not needed in numpy broadcasting. first_not_unity = next((i for (i, s) in enumerate(array.shape) if s > 1), array.ndim) return array.reshape(array.shape[first_not_unity:])
08b29f1710d11539a998d796b1142dee3516c30656093f3db008252c213e4395
"""Utilities and extensions for use with `argparse`.""" import os import argparse def directory(arg): """ An argument type (for use with the ``type=`` argument to `argparse.ArgumentParser.add_argument` which determines if the argument is an existing directory (and returns the absolute path). """ if not isinstance(arg, str) and os.path.isdir(arg): raise argparse.ArgumentTypeError( "{} is not a directory or does not exist (the directory must " "be created first)".format(arg)) return os.path.abspath(arg) def readable_directory(arg): """ An argument type (for use with the ``type=`` argument to `argparse.ArgumentParser.add_argument` which determines if the argument is a directory that exists and is readable (and returns the absolute path). """ arg = directory(arg) if not os.access(arg, os.R_OK): raise argparse.ArgumentTypeError( f"{arg} exists but is not readable with its current permissions") return arg def writeable_directory(arg): """ An argument type (for use with the ``type=`` argument to `argparse.ArgumentParser.add_argument` which determines if the argument is a directory that exists and is writeable (and returns the absolute path). """ arg = directory(arg) if not os.access(arg, os.W_OK): raise argparse.ArgumentTypeError( f"{arg} exists but is not writeable with its current permissions") return arg
cac99fab383aec518aedbcfaef9260c3e8dfe395ca732bfb8c683320685bb31c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Functions for accessing, downloading, and caching data files.""" import atexit import contextlib import errno import fnmatch import functools import hashlib import os import io import re import shutil import ssl import sys import urllib.request import urllib.error import urllib.parse import zipfile import ftplib from tempfile import NamedTemporaryFile, gettempdir, TemporaryDirectory, mkdtemp from warnings import warn try: import certifi except ImportError: # certifi support is optional; when available it will be used for TLS/SSL # downloads certifi = None import astropy.config.paths from astropy import config as _config from astropy.utils.exceptions import AstropyWarning from astropy.utils.introspection import find_current_module, resolve_name # Order here determines order in the autosummary __all__ = [ 'Conf', 'conf', 'download_file', 'download_files_in_parallel', 'get_readable_fileobj', 'get_pkg_data_fileobj', 'get_pkg_data_filename', 'get_pkg_data_contents', 'get_pkg_data_fileobjs', 'get_pkg_data_filenames', 'get_pkg_data_path', 'is_url', 'is_url_in_cache', 'get_cached_urls', 'cache_total_size', 'cache_contents', 'export_download_cache', 'import_download_cache', 'import_file_to_cache', 'check_download_cache', 'clear_download_cache', 'compute_hash', 'get_free_space_in_dir', 'check_free_space_in_dir', 'get_file_contents', 'CacheMissingWarning', "CacheDamaged" ] _dataurls_to_alias = {} class _NonClosingBufferedReader(io.BufferedReader): def __del__(self): try: # NOTE: self.raw will not be closed, but left in the state # it was in at detactment self.detach() except Exception: pass class _NonClosingTextIOWrapper(io.TextIOWrapper): def __del__(self): try: # NOTE: self.stream will not be closed, but left in the state # it was in at detactment self.detach() except Exception: pass class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.utils.data`. """ dataurl = _config.ConfigItem( 'http://data.astropy.org/', 'Primary URL for astropy remote data site.') dataurl_mirror = _config.ConfigItem( 'http://www.astropy.org/astropy-data/', 'Mirror URL for astropy remote data site.') default_http_user_agent = _config.ConfigItem( 'astropy', 'Default User-Agent for HTTP request headers. This can be overwritten ' 'for a particular call via http_headers option, where available. ' 'This only provides the default value when not set by https_headers.') remote_timeout = _config.ConfigItem( 10., 'Time to wait for remote data queries (in seconds).', aliases=['astropy.coordinates.name_resolve.name_resolve_timeout']) allow_internet = _config.ConfigItem( True, 'If False, prevents any attempt to download from Internet.') compute_hash_block_size = _config.ConfigItem( 2 ** 16, # 64K 'Block size for computing file hashes.') download_block_size = _config.ConfigItem( 2 ** 16, # 64K 'Number of bytes of remote data to download per step.') delete_temporary_downloads_at_exit = _config.ConfigItem( True, 'If True, temporary download files created when the cache is ' 'inaccessible will be deleted at the end of the python session.') conf = Conf() class CacheMissingWarning(AstropyWarning): """ This warning indicates the standard cache directory is not accessible, with the first argument providing the warning message. If args[1] is present, it is a filename indicating the path to a temporary file that was created to store a remote data download in the absence of the cache. """ def is_url(string): """ Test whether a string is a valid URL for :func:`download_file`. Parameters ---------- string : str The string to test. Returns ------- status : bool String is URL or not. """ url = urllib.parse.urlparse(string) # we can't just check that url.scheme is not an empty string, because # file paths in windows would return a non-empty scheme (e.g. e:\\ # returns 'e'). return url.scheme.lower() in ['http', 'https', 'ftp', 'sftp', 'ssh', 'file'] # Backward compatibility because some downstream packages allegedly uses it. _is_url = is_url def _is_inside(path, parent_path): # We have to try realpath too to avoid issues with symlinks, but we leave # abspath because some systems like debian have the absolute path (with no # symlinks followed) match, but the real directories in different # locations, so need to try both cases. return os.path.abspath(path).startswith(os.path.abspath(parent_path)) \ or os.path.realpath(path).startswith(os.path.realpath(parent_path)) @contextlib.contextmanager def get_readable_fileobj(name_or_obj, encoding=None, cache=False, show_progress=True, remote_timeout=None, sources=None, http_headers=None): """Yield a readable, seekable file-like object from a file or URL. This supports passing filenames, URLs, and readable file-like objects, any of which can be compressed in gzip, bzip2 or lzma (xz) if the appropriate compression libraries are provided by the Python installation. Notes ----- This function is a context manager, and should be used for example as:: with get_readable_fileobj('file.dat') as f: contents = f.read() If a URL is provided and the cache is in use, the provided URL will be the name used in the cache. The contents may already be stored in the cache under this URL provided, they may be downloaded from this URL, or they may be downloaded from one of the locations listed in ``sources``. See `~download_file` for details. Parameters ---------- name_or_obj : str or file-like The filename of the file to access (if given as a string), or the file-like object to access. If a file-like object, it must be opened in binary mode. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool or "update", optional Whether to cache the contents of remote URLs. If "update", check the remote URL for a new version but store the result in the cache. show_progress : bool, optional Whether to display a progress bar if the file is downloaded from a remote server. Default is `True`. remote_timeout : float Timeout for remote requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : list of str, optional If provided, a list of URLs to try to obtain the file from. The result will be stored under the original URL. The original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. http_headers : dict or None HTTP request headers to pass into ``urlopen`` if needed. (These headers are ignored if the protocol for the ``name_or_obj``/``sources`` entry is not a remote HTTP URL.) In the default case (None), the headers are ``User-Agent: some_value`` and ``Accept: */*``, where ``some_value`` is set by ``astropy.utils.data.conf.default_http_user_agent``. Returns ------- file : readable file-like """ # close_fds is a list of file handles created by this function # that need to be closed. We don't want to always just close the # returned file handle, because it may simply be the file handle # passed in. In that case it is not the responsibility of this # function to close it: doing so could result in a "double close" # and an "invalid file descriptor" exception. close_fds = [] delete_fds = [] if remote_timeout is None: # use configfile default remote_timeout = conf.remote_timeout # name_or_obj could be an os.PathLike object if isinstance(name_or_obj, os.PathLike): name_or_obj = os.fspath(name_or_obj) # Get a file object to the content if isinstance(name_or_obj, str): is_url = _is_url(name_or_obj) if is_url: name_or_obj = download_file( name_or_obj, cache=cache, show_progress=show_progress, timeout=remote_timeout, sources=sources, http_headers=http_headers) fileobj = io.FileIO(name_or_obj, 'r') if is_url and not cache: delete_fds.append(fileobj) close_fds.append(fileobj) else: fileobj = name_or_obj # Check if the file object supports random access, and if not, # then wrap it in a BytesIO buffer. It would be nicer to use a # BufferedReader to avoid reading loading the whole file first, # but that is not compatible with streams or urllib2.urlopen # objects on Python 2.x. if not hasattr(fileobj, 'seek'): try: # py.path.LocalPath objects have .read() method but it uses # text mode, which won't work. .read_binary() does, and # surely other ducks would return binary contents when # called like this. # py.path.LocalPath is what comes from the tmpdir fixture # in pytest. fileobj = io.BytesIO(fileobj.read_binary()) except AttributeError: fileobj = io.BytesIO(fileobj.read()) # Now read enough bytes to look at signature signature = fileobj.read(4) fileobj.seek(0) if signature[:3] == b'\x1f\x8b\x08': # gzip import struct try: import gzip fileobj_new = gzip.GzipFile(fileobj=fileobj, mode='rb') fileobj_new.read(1) # need to check that the file is really gzip except (OSError, EOFError, struct.error): # invalid gzip file fileobj.seek(0) fileobj_new.close() else: fileobj_new.seek(0) fileobj = fileobj_new elif signature[:3] == b'BZh': # bzip2 try: import bz2 except ImportError: for fd in close_fds: fd.close() raise ModuleNotFoundError( "This Python installation does not provide the bz2 module.") try: # bz2.BZ2File does not support file objects, only filenames, so we # need to write the data to a temporary file with NamedTemporaryFile("wb", delete=False) as tmp: tmp.write(fileobj.read()) tmp.close() fileobj_new = bz2.BZ2File(tmp.name, mode='rb') fileobj_new.read(1) # need to check that the file is really bzip2 except OSError: # invalid bzip2 file fileobj.seek(0) fileobj_new.close() # raise else: fileobj_new.seek(0) close_fds.append(fileobj_new) fileobj = fileobj_new elif signature[:3] == b'\xfd7z': # xz try: import lzma fileobj_new = lzma.LZMAFile(fileobj, mode='rb') fileobj_new.read(1) # need to check that the file is really xz except ImportError: for fd in close_fds: fd.close() raise ModuleNotFoundError( "This Python installation does not provide the lzma module.") except (OSError, EOFError): # invalid xz file fileobj.seek(0) fileobj_new.close() # should we propagate this to the caller to signal bad content? # raise ValueError(e) else: fileobj_new.seek(0) fileobj = fileobj_new # By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File # or lzma.LZMAFile instance opened in binary mode (that is, read # returns bytes). Now we need to, if requested, wrap it in a # io.TextIOWrapper so read will return unicode based on the # encoding parameter. needs_textio_wrapper = encoding != 'binary' if needs_textio_wrapper: # A bz2.BZ2File can not be wrapped by a TextIOWrapper, # so we decompress it to a temporary file and then # return a handle to that. try: import bz2 except ImportError: pass else: if isinstance(fileobj, bz2.BZ2File): tmp = NamedTemporaryFile("wb", delete=False) data = fileobj.read() tmp.write(data) tmp.close() delete_fds.append(tmp) fileobj = io.FileIO(tmp.name, 'r') close_fds.append(fileobj) fileobj = _NonClosingBufferedReader(fileobj) fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding) # Ensure that file is at the start - io.FileIO will for # example not always be at the start: # >>> import io # >>> f = open('test.fits', 'rb') # >>> f.read(4) # 'SIMP' # >>> f.seek(0) # >>> fileobj = io.FileIO(f.fileno()) # >>> fileobj.tell() # 4096L fileobj.seek(0) try: yield fileobj finally: for fd in close_fds: fd.close() for fd in delete_fds: os.remove(fd.name) def get_file_contents(*args, **kwargs): """ Retrieves the contents of a filename or file-like object. See the `get_readable_fileobj` docstring for details on parameters. Returns ------- object The content of the file (as requested by ``encoding``). """ with get_readable_fileobj(*args, **kwargs) as f: return f.read() @contextlib.contextmanager def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True): """ Retrieves a data file from the standard locations for the package and provides the file as a file-like object that reads bytes. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool If True, the file will be downloaded and saved locally or the already-cached local copy will be accessed. If False, the file-like object will directly access the resource (e.g. if a remote URL is accessed, an object like that from `urllib.request.urlopen` is returned). Returns ------- fileobj : file-like An object with the contents of the data file available via ``read`` function. Can be used as part of a ``with`` statement, automatically closing itself after the ``with`` block. Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. Examples -------- This will retrieve a data file and its contents for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_fileobj >>> with get_pkg_data_fileobj('data/3d_cd.hdr', ... package='astropy.wcs.tests') as fobj: ... fcontents = fobj.read() ... This next example would download a data file from the astropy data server because the ``allsky/allsky_rosat.fits`` file is not present in the source distribution. It will also save the file locally so the next time it is accessed it won't need to be downloaded.:: >>> from astropy.utils.data import get_pkg_data_fileobj >>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits', ... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT ... fcontents = fobj.read() ... Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done] This does the same thing but does *not* cache it locally:: >>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits', ... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT ... fcontents = fobj.read() ... Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done] See Also -------- get_pkg_data_contents : returns the contents of a file or url as a bytes object get_pkg_data_filename : returns a local name for a file containing the data """ # noqa datafn = get_pkg_data_path(data_name, package=package) if os.path.isdir(datafn): raise OSError("Tried to access a data file that's actually " "a package data directory") elif os.path.isfile(datafn): # local file with get_readable_fileobj(datafn, encoding=encoding) as fileobj: yield fileobj else: # remote file with get_readable_fileobj( conf.dataurl + data_name, encoding=encoding, cache=cache, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name], ) as fileobj: # We read a byte to trigger any URLErrors fileobj.read(1) fileobj.seek(0) yield fileobj def get_pkg_data_filename(data_name, package=None, show_progress=True, remote_timeout=None): """ Retrieves a data file from the standard locations for the package and provides a local filename for the data. This function is similar to `get_pkg_data_fileobj` but returns the file *name* instead of a readable file-like object. This means that this function must always cache remote files locally, unlike `get_pkg_data_fileobj`. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. show_progress : bool, optional Whether to display a progress bar if the file is downloaded from a remote server. Default is `True`. remote_timeout : float Timeout for the requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. Returns ------- filename : str A file path on the local file system corresponding to the data requested in ``data_name``. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filename >>> fn = get_pkg_data_filename('data/3d_cd.hdr', ... package='astropy.wcs.tests') >>> with open(fn) as f: ... fcontents = f.read() ... This retrieves a data file by hash either locally or from the astropy data server:: >>> from astropy.utils.data import get_pkg_data_filename >>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP >>> with open(fn) as f: ... fcontents = f.read() ... See Also -------- get_pkg_data_contents : returns the contents of a file or url as a bytes object get_pkg_data_fileobj : returns a file-like object with the data """ if remote_timeout is None: # use configfile default remote_timeout = conf.remote_timeout if data_name.startswith('hash/'): # first try looking for a local version if a hash is specified hashfn = _find_hash_fn(data_name[5:]) if hashfn is None: return download_file(conf.dataurl + data_name, cache=True, show_progress=show_progress, timeout=remote_timeout, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name]) else: return hashfn else: fs_path = os.path.normpath(data_name) datafn = get_pkg_data_path(fs_path, package=package) if os.path.isdir(datafn): raise OSError("Tried to access a data file that's actually " "a package data directory") elif os.path.isfile(datafn): # local file return datafn else: # remote file return download_file(conf.dataurl + data_name, cache=True, show_progress=show_progress, timeout=remote_timeout, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name]) def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True): """ Retrieves a data file from the standard locations and returns its contents as a bytes object. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. * A URL to some other file. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool If True, the file will be downloaded and saved locally or the already-cached local copy will be accessed. If False, the file-like object will directly access the resource (e.g. if a remote URL is accessed, an object like that from `urllib.request.urlopen` is returned). Returns ------- contents : bytes The complete contents of the file as a bytes object. Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. See Also -------- get_pkg_data_fileobj : returns a file-like object with the data get_pkg_data_filename : returns a local name for a file containing the data """ with get_pkg_data_fileobj(data_name, package=package, encoding=encoding, cache=cache) as fd: contents = fd.read() return contents def get_pkg_data_filenames(datadir, package=None, pattern='*'): """ Returns the path of all of the data files in a given directory that match a given glob pattern. Parameters ---------- datadir : str Name/location of the desired data files. One of the following: * The name of a directory included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data'`` to get the files in ``astropy/pkgname/data``. * Remote URLs are not currently supported. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. pattern : str, optional A UNIX-style filename glob pattern to match files. See the `glob` module in the standard library for more information. By default, matches all files. Returns ------- filenames : iterator of str Paths on the local filesystem in *datadir* matching *pattern*. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filenames >>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests', ... '*.hdr'): ... with open(fn) as f: ... fcontents = f.read() ... """ path = get_pkg_data_path(datadir, package=package) if os.path.isfile(path): raise OSError( "Tried to access a data directory that's actually " "a package data file") elif os.path.isdir(path): for filename in os.listdir(path): if fnmatch.fnmatch(filename, pattern): yield os.path.join(path, filename) else: raise OSError("Path not found") def get_pkg_data_fileobjs(datadir, package=None, pattern='*', encoding=None): """ Returns readable file objects for all of the data files in a given directory that match a given glob pattern. Parameters ---------- datadir : str Name/location of the desired data files. One of the following: * The name of a directory included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data'`` to get the files in ``astropy/pkgname/data`` * Remote URLs are not currently supported package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. pattern : str, optional A UNIX-style filename glob pattern to match files. See the `glob` module in the standard library for more information. By default, matches all files. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. Returns ------- fileobjs : iterator of file object File objects for each of the files on the local filesystem in *datadir* matching *pattern*. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filenames >>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests', ... '*.hdr'): ... fcontents = fd.read() ... """ for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern): with get_readable_fileobj(fn, encoding=encoding) as fd: yield fd def compute_hash(localfn): """ Computes the MD5 hash for a file. The hash for a data file is used for looking up data files in a unique fashion. This is of particular use for tests; a test may require a particular version of a particular file, in which case it can be accessed via hash to get the appropriate version. Typically, if you wish to write a test that requires a particular data file, you will want to submit that file to the astropy data servers, and use e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``, but with the hash for your file in place of the hash in the example. Parameters ---------- localfn : str The path to the file for which the hash should be generated. Returns ------- hash : str The hex digest of the cryptographic hash for the contents of the ``localfn`` file. """ with open(localfn, 'rb') as f: h = hashlib.md5() block = f.read(conf.compute_hash_block_size) while block: h.update(block) block = f.read(conf.compute_hash_block_size) return h.hexdigest() def get_pkg_data_path(*path, package=None): """Get path from source-included data directories. Parameters ---------- *path : str Name/location of the desired data file/directory. May be a tuple of strings for ``os.path`` joining. package : str or None, optional, keyword-only If specified, look for a file relative to the given package, rather than the calling module's package. Returns ------- path : str Name/location of the desired data file/directory. Raises ------ ImportError Given package or module is not importable. RuntimeError If the local data file is outside of the package's tree. """ if package is None: module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib']) if module is None: # not called from inside an astropy package. So just pass name # through return os.path.join(*path) if not hasattr(module, '__package__') or not module.__package__: # The __package__ attribute may be missing or set to None; see # PEP-366, also astropy issue #1256 if '.' in module.__name__: package = module.__name__.rpartition('.')[0] else: package = module.__name__ else: package = module.__package__ else: # package errors if it isn't a str # so there is no need for checks in the containing if/else module = resolve_name(package) # module path within package module_path = os.path.dirname(module.__file__) full_path = os.path.join(module_path, *path) # Check that file is inside tree. rootpkgname = package.partition('.')[0] rootpkg = resolve_name(rootpkgname) root_dir = os.path.dirname(rootpkg.__file__) if not _is_inside(full_path, root_dir): raise RuntimeError(f"attempted to get a local data file outside " f"of the {rootpkgname} tree.") return full_path def _find_hash_fn(hexdigest, pkgname='astropy'): """ Looks for a local file by hash - returns file name if found and a valid file, otherwise returns None. """ for v in cache_contents(pkgname=pkgname).values(): if compute_hash(v) == hexdigest: return v return None def get_free_space_in_dir(path, unit=False): """ Given a path to a directory, returns the amount of free space on that filesystem. Parameters ---------- path : str The path to a directory. unit : bool or `~astropy.units.Unit` Return the amount of free space as Quantity in the given unit, if provided. Default is `False` for backward-compatibility. Returns ------- free_space : int or `~astropy.units.Quantity` The amount of free space on the partition that the directory is on. If ``unit=False``, it is returned as plain integer (in bytes). """ if not os.path.isdir(path): raise OSError( "Can only determine free space associated with directories, " "not files.") # Actually you can on Linux but I want to avoid code that fails # on Windows only. free_space = shutil.disk_usage(path).free if unit: from astropy import units as u # TODO: Automatically determine best prefix to use. if unit is True: unit = u.byte free_space = u.Quantity(free_space, u.byte).to(unit) return free_space def check_free_space_in_dir(path, size): """ Determines if a given directory has enough space to hold a file of a given size. Parameters ---------- path : str The path to a directory. size : int or `~astropy.units.Quantity` A proposed filesize. If not a Quantity, assume it is in bytes. Raises ------ OSError There is not enough room on the filesystem. """ space = get_free_space_in_dir(path, unit=getattr(size, 'unit', False)) if space < size: from astropy.utils.console import human_file_size raise OSError(f"Not enough free space in {path} " f"to download a {human_file_size(size)} file, " f"only {human_file_size(space)} left") class _ftptlswrapper(urllib.request.ftpwrapper): def init(self): self.busy = 0 self.ftp = ftplib.FTP_TLS() self.ftp.connect(self.host, self.port, self.timeout) self.ftp.login(self.user, self.passwd) self.ftp.prot_p() _target = '/'.join(self.dirs) self.ftp.cwd(_target) class _FTPTLSHandler(urllib.request.FTPHandler): def connect_ftp(self, user, passwd, host, port, dirs, timeout): return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False) @functools.lru_cache() def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False): """ Helper for building a `urllib.request.build_opener` which handles TLS/SSL. """ ssl_context = dict(it for it in ssl_context) if ssl_context else {} cert_chain = {} if 'certfile' in ssl_context: cert_chain.update({ 'certfile': ssl_context.pop('certfile'), 'keyfile': ssl_context.pop('keyfile', None), 'password': ssl_context.pop('password', None) }) elif 'password' in ssl_context or 'keyfile' in ssl_context: raise ValueError( "passing 'keyfile' or 'password' in the ssl_context argument " "requires passing 'certfile' as well") if 'cafile' not in ssl_context and certifi is not None: ssl_context['cafile'] = certifi.where() ssl_context = ssl.create_default_context(**ssl_context) if allow_insecure: ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE if cert_chain: ssl_context.load_cert_chain(**cert_chain) https_handler = urllib.request.HTTPSHandler(context=ssl_context) if ftp_tls: urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler) else: urlopener = urllib.request.build_opener(https_handler) return urlopener def _try_url_open(source_url, timeout=None, http_headers=None, ftp_tls=False, ssl_context=None, allow_insecure=False): """Helper for opening a URL while handling TLS/SSL verification issues.""" # Always try first with a secure connection # _build_urlopener uses lru_cache, so the ssl_context argument must be # converted to a hashshable type (a set of 2-tuples) ssl_context = frozenset(ssl_context.items() if ssl_context else []) urlopener = _build_urlopener(ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False) req = urllib.request.Request(source_url, headers=http_headers) try: return urlopener.open(req, timeout=timeout) except urllib.error.URLError as exc: reason = exc.reason if (isinstance(reason, ssl.SSLError) and reason.reason == 'CERTIFICATE_VERIFY_FAILED'): msg = (f'Verification of TLS/SSL certificate at {source_url} ' f'failed: this can mean either the server is ' f'misconfigured or your local root CA certificates are ' f'out-of-date; in the latter case this can usually be ' f'addressed by installing the Python package "certifi" ' f'(see the documentation for astropy.utils.data.download_url)') if not allow_insecure: msg += (f' or in both cases you can work around this by ' f'passing allow_insecure=True, but only if you ' f'understand the implications; the original error ' f'was: {reason}') raise urllib.error.URLError(msg) else: msg += '. Re-trying with allow_insecure=True.' warn(msg, AstropyWarning) # Try again with a new urlopener allowing insecure connections urlopener = _build_urlopener(ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True) return urlopener.open(req, timeout=timeout) raise def _download_file_from_source(source_url, show_progress=True, timeout=None, remote_url=None, cache=False, pkgname='astropy', http_headers=None, ftp_tls=None, ssl_context=None, allow_insecure=False): from astropy.utils.console import ProgressBarOrSpinner if not conf.allow_internet: raise urllib.error.URLError( f"URL {remote_url} was supposed to be downloaded but " f"allow_internet is {conf.allow_internet}; " f"if this is unexpected check the astropy.cfg file for the option " f"allow_internet") if remote_url is None: remote_url = source_url if http_headers is None: http_headers = {} if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp": try: return _download_file_from_source(source_url, show_progress=show_progress, timeout=timeout, remote_url=remote_url, cache=cache, pkgname=pkgname, http_headers=http_headers, ftp_tls=False) except urllib.error.URLError as e: # e.reason might not be a string, e.g. socket.gaierror if str(e.reason).startswith("ftp error: error_perm"): ftp_tls = True else: raise with _try_url_open(source_url, timeout=timeout, http_headers=http_headers, ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=allow_insecure) as remote: info = remote.info() try: size = int(info['Content-Length']) except (KeyError, ValueError, TypeError): size = None if size is not None: check_free_space_in_dir(gettempdir(), size) if cache: dldir = _get_download_cache_loc(pkgname) check_free_space_in_dir(dldir, size) if show_progress and sys.stdout.isatty(): progress_stream = sys.stdout else: progress_stream = io.StringIO() if source_url == remote_url: dlmsg = f"Downloading {remote_url}" else: dlmsg = f"Downloading {remote_url} from {source_url}" with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p: with NamedTemporaryFile(prefix=f"astropy-download-{os.getpid()}-", delete=False) as f: try: bytes_read = 0 block = remote.read(conf.download_block_size) while block: f.write(block) bytes_read += len(block) p.update(bytes_read) block = remote.read(conf.download_block_size) if size is not None and bytes_read > size: raise urllib.error.URLError( f"File was supposed to be {size} bytes but " f"server provides more, at least {bytes_read} " f"bytes. Download failed.") if size is not None and bytes_read < size: raise urllib.error.ContentTooShortError( f"File was supposed to be {size} bytes but we " f"only got {bytes_read} bytes. Download failed.", content=None) except BaseException: if os.path.exists(f.name): try: os.remove(f.name) except OSError: pass raise return f.name def download_file(remote_url, cache=False, show_progress=True, timeout=None, sources=None, pkgname='astropy', http_headers=None, ssl_context=None, allow_insecure=False): """Downloads a URL and optionally caches the result. It returns the filename of a file containing the URL's contents. If ``cache=True`` and the file is present in the cache, just returns the filename; if the file had to be downloaded, add it to the cache. If ``cache="update"`` always download and add it to the cache. The cache is effectively a dictionary mapping URLs to files; by default the file contains the contents of the URL that is its key, but in practice these can be obtained from a mirror (using ``sources``) or imported from the local filesystem (using `~import_file_to_cache` or `~import_download_cache`). Regardless, each file is regarded as representing the contents of a particular URL, and this URL should be used to look them up or otherwise manipulate them. The files in the cache directory are named according to a cryptographic hash of their URLs (currently MD5, so hackers can cause collisions). The modification times on these files normally indicate when they were last downloaded from the Internet. Parameters ---------- remote_url : str The URL of the file to download cache : bool or "update", optional Whether to cache the contents of remote URLs. If "update", always download the remote URL in case there is a new version and store the result in the cache. show_progress : bool, optional Whether to display a progress bar during the download (default is `True`). Regardless of this setting, the progress bar is only displayed when outputting to a terminal. timeout : float, optional Timeout for remote requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : list of str, optional If provided, a list of URLs to try to obtain the file from. The result will be stored under the original URL. The original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. If an empty list is passed, then ``download_file`` will not attempt to connect to the Internet, that is, if the file is not in the cache a KeyError will be raised. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. http_headers : dict or None HTTP request headers to pass into ``urlopen`` if needed. (These headers are ignored if the protocol for the ``name_or_obj``/``sources`` entry is not a remote HTTP URL.) In the default case (None), the headers are ``User-Agent: some_value`` and ``Accept: */*``, where ``some_value`` is set by ``astropy.utils.data.conf.default_http_user_agent``. ssl_context : dict, optional Keyword arguments to pass to `ssl.create_default_context` when downloading from HTTPS or TLS+FTP sources. This can be used provide alternative paths to root CA certificates. Additionally, if the key ``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are included, they are passed to `ssl.SSLContext.load_cert_chain`. This can be used for performing SSL/TLS client certificate authentication for servers that require it. allow_insecure : bool, optional Allow downloading files over a TLS/SSL connection even when the server certificate verification failed. When set to `True` the potentially insecure download is allowed to proceed, but an `~astropy.utils.exceptions.AstropyWarning` is issued. If you are frequently getting certificate verification warnings, consider installing or upgrading `certifi`_ package, which provides frequently updated certificates for common root CAs (i.e., a set similar to those used by web browsers). If installed, Astropy will use it automatically. .. _certifi: https://pypi.org/project/certifi/ Returns ------- local_path : str Returns the local path that the file was download to. Raises ------ urllib.error.URLError Whenever there's a problem getting the remote file. KeyError When a file was requested from the cache but is missing and no sources were provided to obtain it from the Internet. Notes ----- Because this function returns a filename, another process could run `clear_download_cache` before you actually open the file, leaving you with a filename that no longer points to a usable file. """ if timeout is None: timeout = conf.remote_timeout if sources is None: sources = [remote_url] if http_headers is None: http_headers = {'User-Agent': conf.default_http_user_agent, 'Accept': '*/*'} missing_cache = "" url_key = remote_url if cache: try: dldir = _get_download_cache_loc(pkgname) except OSError as e: cache = False missing_cache = ( f"Cache directory cannot be read or created ({e}), " f"providing data in temporary file instead." ) else: if cache == "update": pass elif isinstance(cache, str): raise ValueError(f"Cache value '{cache}' was requested but " f"'update' is the only recognized string; " f"otherwise use a boolean") else: filename = os.path.join(dldir, _url_to_dirname(url_key), "contents") if os.path.exists(filename): return os.path.abspath(filename) errors = {} for source_url in sources: try: f_name = _download_file_from_source( source_url, timeout=timeout, show_progress=show_progress, cache=cache, remote_url=remote_url, pkgname=pkgname, http_headers=http_headers, ssl_context=ssl_context, allow_insecure=allow_insecure) # Success! break except urllib.error.URLError as e: # errno 8 is from SSL "EOF occurred in violation of protocol" if (hasattr(e, 'reason') and hasattr(e.reason, 'errno') and e.reason.errno == 8): e.reason.strerror = (e.reason.strerror + '. requested URL: ' + remote_url) e.reason.args = (e.reason.errno, e.reason.strerror) errors[source_url] = e else: # No success if not sources: raise KeyError( f"No sources listed and file {remote_url} not in cache! " f"Please include primary URL in sources if you want it to be " f"included as a valid source.") elif len(sources) == 1: raise errors[sources[0]] else: raise urllib.error.URLError( f"Unable to open any source! Exceptions were {errors}") \ from errors[sources[0]] if cache: try: return import_file_to_cache(url_key, f_name, remove_original=True, replace=(cache == 'update'), pkgname=pkgname) except PermissionError as e: # Cache is readonly, we can't update it missing_cache = ( f"Cache directory appears to be read-only ({e}), unable to import " f"downloaded file, providing data in temporary file {f_name} " f"instead.") # FIXME: other kinds of cache problem can occur? if missing_cache: warn(CacheMissingWarning(missing_cache, f_name)) if conf.delete_temporary_downloads_at_exit: global _tempfilestodel _tempfilestodel.append(f_name) return os.path.abspath(f_name) def is_url_in_cache(url_key, pkgname='astropy'): """Check if a download for ``url_key`` is in the cache. The provided ``url_key`` will be the name used in the cache. The contents may have been downloaded from this URL or from a mirror or they may have been provided by the user. See `~download_file` for details. Parameters ---------- url_key : str The URL retrieved pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- in_cache : bool `True` if a download for ``url_key`` is in the cache, `False` if not or if the cache does not exist at all. See Also -------- cache_contents : obtain a dictionary listing everything in the cache """ try: dldir = _get_download_cache_loc(pkgname) except OSError: return False filename = os.path.join(dldir, _url_to_dirname(url_key), "contents") return os.path.exists(filename) def cache_total_size(pkgname='astropy'): """Return the total size in bytes of all files in the cache.""" size = 0 dldir = _get_download_cache_loc(pkgname=pkgname) for root, dirs, files in os.walk(dldir): size += sum(os.path.getsize(os.path.join(root, name)) for name in files) return size def _do_download_files_in_parallel(kwargs): with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")): with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")): return download_file(**kwargs) def download_files_in_parallel(urls, cache="update", show_progress=True, timeout=None, sources=None, multiprocessing_start_method=None, pkgname='astropy'): """Download multiple files in parallel from the given URLs. Blocks until all files have downloaded. The result is a list of local file paths corresponding to the given urls. The results will be stored in the cache under the values in ``urls`` even if they are obtained from some other location via ``sources``. See `~download_file` for details. Parameters ---------- urls : list of str The URLs to retrieve. cache : bool or "update", optional Whether to use the cache (default is `True`). If "update", always download the remote URLs to see if new data is available and store the result in cache. .. versionchanged:: 4.0 The default was changed to ``"update"`` and setting it to ``False`` will print a Warning and set it to ``"update"`` again, because the function will not work properly without cache. Using ``True`` will work as expected. .. versionchanged:: 3.0 The default was changed to ``True`` and setting it to ``False`` will print a Warning and set it to ``True`` again, because the function will not work properly without cache. show_progress : bool, optional Whether to display a progress bar during the download (default is `True`) timeout : float, optional Timeout for each individual requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : dict, optional If provided, for each URL a list of URLs to try to obtain the file from. The result will be stored under the original URL. For any URL in this dictionary, the original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. multiprocessing_start_method : str, optional Useful primarily for testing; if in doubt leave it as the default. When using multiprocessing, certain anomalies occur when starting processes with the "spawn" method (the only option on Windows); other anomalies occur with the "fork" method (the default on Linux). pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- paths : list of str The local file paths corresponding to the downloaded URLs. Notes ----- If a URL is unreachable, the downloading will grind to a halt and the exception will propagate upward, but an unpredictable number of files will have been successfully downloaded and will remain in the cache. """ from .console import ProgressBar if timeout is None: timeout = conf.remote_timeout if sources is None: sources = {} if not cache: # See issue #6662, on windows won't work because the files are removed # again before they can be used. On *NIX systems it will behave as if # cache was set to True because multiprocessing cannot insert the items # in the list of to-be-removed files. This could be fixed, but really, # just use the cache, with update_cache if appropriate. warn('Disabling the cache does not work because of multiprocessing, ' 'it will be set to ``"update"``. You may need to manually remove ' 'the cached files with clear_download_cache() afterwards.', AstropyWarning) cache = "update" if show_progress: progress = sys.stdout else: progress = io.BytesIO() # Combine duplicate URLs combined_urls = list(set(urls)) combined_paths = ProgressBar.map( _do_download_files_in_parallel, [dict(remote_url=u, cache=cache, show_progress=False, timeout=timeout, sources=sources.get(u, None), pkgname=pkgname, temp_cache=astropy.config.paths.set_temp_cache._temp_path, temp_config=astropy.config.paths.set_temp_config._temp_path) for u in combined_urls], file=progress, multiprocess=True, multiprocessing_start_method=multiprocessing_start_method, ) paths = [] for url in urls: paths.append(combined_paths[combined_urls.index(url)]) return paths # This is used by download_file and _deltemps to determine the files to delete # when the interpreter exits _tempfilestodel = [] @atexit.register def _deltemps(): global _tempfilestodel if _tempfilestodel is not None: while len(_tempfilestodel) > 0: fn = _tempfilestodel.pop() if os.path.isfile(fn): try: os.remove(fn) except OSError: # oh well we tried # could be held open by some process, on Windows pass elif os.path.isdir(fn): try: shutil.rmtree(fn) except OSError: # couldn't get rid of it, sorry # could be held open by some process, on Windows pass def clear_download_cache(hashorurl=None, pkgname='astropy'): """Clears the data file cache by deleting the local file(s). If a URL is provided, it will be the name used in the cache. The contents may have been downloaded from this URL or from a mirror or they may have been provided by the user. See `~download_file` for details. For the purposes of this function, a file can also be identified by a hash of its contents or by the filename under which the data is stored (as returned by `~download_file`, for example). Parameters ---------- hashorurl : str or None If None, the whole cache is cleared. Otherwise, specify a hash for the cached file that is supposed to be deleted, the full path to a file in the cache that should be deleted, or a URL that should be removed from the cache if present. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. """ try: dldir = _get_download_cache_loc(pkgname) except OSError as e: # Problem arose when trying to open the cache # Just a warning, though msg = 'Not clearing data cache - cache inaccessible due to ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) return try: if hashorurl is None: # Optional: delete old incompatible caches too _rmtree(dldir) elif _is_url(hashorurl): filepath = os.path.join(dldir, _url_to_dirname(hashorurl)) _rmtree(filepath) else: # Not a URL, it should be either a filename or a hash filepath = os.path.join(dldir, hashorurl) rp = os.path.relpath(filepath, dldir) if rp.startswith(".."): raise RuntimeError( f"attempted to use clear_download_cache on the path " f"{filepath} outside the data cache directory {dldir}") d, f = os.path.split(rp) if d and f in ["contents", "url"]: # It's a filename not the hash of a URL # so we want to zap the directory containing the # files "url" and "contents" filepath = os.path.join(dldir, d) if os.path.exists(filepath): _rmtree(filepath) elif (len(hashorurl) == 2*hashlib.md5().digest_size and re.match(r"[0-9a-f]+", hashorurl)): # It's the hash of some file contents, we have to find the right file filename = _find_hash_fn(hashorurl) if filename is not None: clear_download_cache(filename) except OSError as e: msg = 'Not clearing data from cache - problem arose ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) def _get_download_cache_loc(pkgname='astropy'): """Finds the path to the cache directory and makes them if they don't exist. Parameters ---------- pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- datadir : str The path to the data cache directory. """ try: datadir = os.path.join(astropy.config.paths.get_cache_dir(pkgname), 'download', 'url') if not os.path.exists(datadir): try: os.makedirs(datadir) except OSError: if not os.path.exists(datadir): raise elif not os.path.isdir(datadir): raise OSError(f'Data cache directory {datadir} is not a directory') return datadir except OSError as e: msg = 'Remote data cache could not be accessed due to ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) raise def _url_to_dirname(url): if not _is_url(url): raise ValueError(f"Malformed URL: '{url}'") # Make domain names case-insensitive # Also makes the http:// case-insensitive urlobj = list(urllib.parse.urlsplit(url)) urlobj[1] = urlobj[1].lower() if urlobj[0].lower() in ['http', 'https'] and urlobj[1] and urlobj[2] == '': urlobj[2] = '/' url_c = urllib.parse.urlunsplit(urlobj) return hashlib.md5(url_c.encode("utf-8")).hexdigest() class ReadOnlyDict(dict): def __setitem__(self, key, value): raise TypeError("This object is read-only.") _NOTHING = ReadOnlyDict({}) class CacheDamaged(ValueError): """Record the URL or file that was a problem. Using clear_download_cache on the .bad_file or .bad_url attribute, whichever is not None, should resolve this particular problem. """ def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs): super().__init__(*args, **kwargs) self.bad_urls = bad_urls if bad_urls is not None else [] self.bad_files = bad_files if bad_files is not None else [] def check_download_cache(pkgname='astropy'): """Do a consistency check on the cache. .. note:: Since v5.0, this function no longer returns anything. Because the cache is shared by all versions of ``astropy`` in all virtualenvs run by your user, possibly concurrently, it could accumulate problems. This could lead to hard-to-debug problems or wasted space. This function detects a number of incorrect conditions, including nonexistent files that are indexed, files that are indexed but in the wrong place, and, if you request it, files whose content does not match the hash that is indexed. This function also returns a list of non-indexed files. A few will be associated with the shelve object; their exact names depend on the backend used but will probably be based on ``urlmap``. The presence of other files probably indicates that something has gone wrong and inaccessible files have accumulated in the cache. These can be removed with :func:`clear_download_cache`, either passing the filename returned here, or with no arguments to empty the entire cache and return it to a reasonable, if empty, state. Parameters ---------- pkgname : str, optional The package name to use to locate the download cache, i.e., for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Raises ------ `~astropy.utils.data.CacheDamaged` To indicate a problem with the cache contents; the exception contains a ``.bad_files`` attribute containing a set of filenames to allow the user to use :func:`clear_download_cache` to remove the offending items. OSError, RuntimeError To indicate some problem with the cache structure. This may need a full :func:`clear_download_cache` to resolve, or may indicate some kind of misconfiguration. """ bad_files = set() messages = set() dldir = _get_download_cache_loc(pkgname=pkgname) with os.scandir(dldir) as it: for entry in it: f = os.path.abspath(os.path.join(dldir, entry.name)) if entry.name.startswith("rmtree-"): if f not in _tempfilestodel: bad_files.add(f) messages.add(f"Cache entry {entry.name} not scheduled for deletion") elif entry.is_dir(): for sf in os.listdir(f): if sf in ['url', 'contents']: continue sf = os.path.join(f, sf) bad_files.add(sf) messages.add(f"Unexpected file f{sf}") urlf = os.path.join(f, "url") url = None if not os.path.isfile(urlf): bad_files.add(urlf) messages.add(f"Problem with URL file f{urlf}") else: url = get_file_contents(urlf, encoding="utf-8") if not _is_url(url): bad_files.add(f) messages.add(f"Malformed URL: {url}") else: hashname = _url_to_dirname(url) if entry.name != hashname: bad_files.add(f) messages.add(f"URL hashes to {hashname} but is stored in {entry.name}") if not os.path.isfile(os.path.join(f, "contents")): bad_files.add(f) if url is None: messages.add(f"Hash {entry.name} is missing contents") else: messages.add(f"URL {url} with hash {entry.name} is missing contents") else: bad_files.add(f) messages.add(f"Left-over non-directory {f} in cache") if bad_files: raise CacheDamaged("\n".join(messages), bad_files=bad_files) @contextlib.contextmanager def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None): """Temporary directory context manager This will not raise an exception if the temporary directory goes away before it's supposed to be deleted. Specifically, what is deleted will be the directory *name* produced; if no such directory exists, no exception will be raised. It would be safer to delete it only if it's really the same directory - checked by file descriptor - and if it's still called the same thing. But that opens a platform-specific can of worms. It would also be more robust to use ExitStack and TemporaryDirectory, which is more aggressive about removing readonly things. """ d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir) try: yield d finally: try: shutil.rmtree(d) except OSError: pass def _rmtree(path, replace=None): """More-atomic rmtree. Ignores missing directory.""" with TemporaryDirectory(prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))) as d: try: os.rename(path, os.path.join(d, "to-zap")) except FileNotFoundError: pass except PermissionError: warn(CacheMissingWarning( f"Unable to remove directory {path} because a file in it " f"is in use and you are on Windows", path)) raise if replace is not None: try: os.rename(replace, path) except FileExistsError: # already there, fine pass except OSError as e: if e.errno == errno.ENOTEMPTY: # already there, fine pass else: raise def import_file_to_cache(url_key, filename, remove_original=False, pkgname='astropy', *, replace=True): """Import the on-disk file specified by filename to the cache. The provided ``url_key`` will be the name used in the cache. The file should contain the contents of this URL, at least notionally (the URL may be temporarily or permanently unavailable). It is using ``url_key`` that users will request these contents from the cache. See :func:`download_file` for details. If ``url_key`` already exists in the cache, it will be updated to point to these imported contents, and its old contents will be deleted from the cache. Parameters ---------- url_key : str The key to index the file under. This should probably be the URL where the file was located, though if you obtained it from a mirror you should use the URL of the primary location. filename : str The file whose contents you want to import. remove_original : bool Whether to remove the original file (``filename``) once import is complete. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. replace : boolean, optional Whether or not to replace an existing object in the cache, if one exists. If replacement is not requested but the object exists, silently pass. """ cache_dir = _get_download_cache_loc(pkgname=pkgname) cache_dirname = _url_to_dirname(url_key) local_dirname = os.path.join(cache_dir, cache_dirname) local_filename = os.path.join(local_dirname, "contents") with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir: temp_filename = os.path.join(temp_dir, "contents") # Make sure we're on the same filesystem # This will raise an exception if the url_key doesn't turn into a valid filename shutil.copy(filename, temp_filename) with open(os.path.join(temp_dir, "url"), "wt", encoding="utf-8") as f: f.write(url_key) if replace: _rmtree(local_dirname, replace=temp_dir) else: try: os.rename(temp_dir, local_dirname) except FileExistsError: # already there, fine pass except OSError as e: if e.errno == errno.ENOTEMPTY: # already there, fine pass else: raise if remove_original: os.remove(filename) return os.path.abspath(local_filename) def get_cached_urls(pkgname='astropy'): """ Get the list of URLs in the cache. Especially useful for looking up what files are stored in your cache when you don't have internet access. The listed URLs are the keys programs should use to access the file contents, but those contents may have actually been obtained from a mirror. See `~download_file` for details. Parameters ---------- pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- cached_urls : list List of cached URLs. See Also -------- cache_contents : obtain a dictionary listing everything in the cache """ return sorted(cache_contents(pkgname=pkgname).keys()) def cache_contents(pkgname='astropy'): """Obtain a dict mapping cached URLs to filenames. This dictionary is a read-only snapshot of the state of the cache when this function was called. If other processes are actively working with the cache, it is possible for them to delete files that are listed in this dictionary. Use with some caution if you are working on a system that is busy with many running astropy processes, although the same issues apply to most functions in this module. """ r = {} try: dldir = _get_download_cache_loc(pkgname=pkgname) except OSError: return _NOTHING with os.scandir(dldir) as it: for entry in it: if entry.is_dir: url = get_file_contents(os.path.join(dldir, entry.name, "url"), encoding="utf-8") r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents")) return ReadOnlyDict(r) def export_download_cache(filename_or_obj, urls=None, overwrite=False, pkgname='astropy'): """Exports the cache contents as a ZIP file. Parameters ---------- filename_or_obj : str or file-like Where to put the created ZIP file. Must be something the zipfile module can write to. urls : iterable of str or None The URLs to include in the exported cache. The default is all URLs currently in the cache. If a URL is included in this list but is not currently in the cache, a KeyError will be raised. To ensure that all are in the cache use `~download_file` or `~download_files_in_parallel`. overwrite : bool, optional If filename_or_obj is a filename that exists, it will only be overwritten if this is True. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. See Also -------- import_download_cache : import the contents of such a ZIP file import_file_to_cache : import a single file directly """ if urls is None: urls = get_cached_urls(pkgname) with zipfile.ZipFile(filename_or_obj, 'w' if overwrite else 'x') as z: for u in urls: fn = download_file(u, cache=True, sources=[], pkgname=pkgname) # Do not use os.path.join because ZIP files want # "/" on all platforms z_fn = urllib.parse.quote(u, safe="") z.write(fn, z_fn) def import_download_cache(filename_or_obj, urls=None, update_cache=False, pkgname='astropy'): """Imports the contents of a ZIP file into the cache. Each member of the ZIP file should be named by a quoted version of the URL whose contents it stores. These names are decoded with :func:`~urllib.parse.unquote`. Parameters ---------- filename_or_obj : str or file-like Where the stored ZIP file is. Must be something the :mod:`~zipfile` module can read from. urls : set of str or list of str or None The URLs to import from the ZIP file. The default is all URLs in the file. update_cache : bool, optional If True, any entry in the ZIP file will overwrite the value in the cache; if False, leave untouched any entry already in the cache. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. See Also -------- export_download_cache : export the contents the cache to of such a ZIP file import_file_to_cache : import a single file directly """ with zipfile.ZipFile(filename_or_obj, 'r') as z, TemporaryDirectory() as d: for i, zf in enumerate(z.infolist()): url = urllib.parse.unquote(zf.filename) # FIXME(aarchiba): do we want some kind of validation on this URL? # urllib.parse might do something sensible...but what URLs might # they have? # is_url in this file is probably a good check, not just here # but throughout this file. if urls is not None and url not in urls: continue if not update_cache and is_url_in_cache(url, pkgname=pkgname): continue f_temp_name = os.path.join(d, str(i)) with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp: block = f_zip.read(conf.download_block_size) while block: f_temp.write(block) block = f_zip.read(conf.download_block_size) import_file_to_cache(url, f_temp_name, remove_original=True, pkgname=pkgname)
c641013da3ddcf1a7e87b90200926d229f93e0aa36f6e16ba8cba8d096c93a43
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ A "grab bag" of relatively small general-purpose utilities that don't have a clear module/package to live in. """ import abc import contextlib import difflib import inspect import json import os import signal import sys import traceback import unicodedata import locale import threading import re from contextlib import contextmanager from collections import defaultdict, OrderedDict from astropy.utils.decorators import deprecated __all__ = ['isiterable', 'silence', 'format_exception', 'NumpyRNGContext', 'find_api_page', 'is_path_hidden', 'walk_skip_hidden', 'JsonCustomEncoder', 'indent', 'dtype_bytes_or_chars', 'OrderedDescriptor', 'OrderedDescriptorContainer'] # Because they are deprecated. __doctest_skip__ = ['OrderedDescriptor', 'OrderedDescriptorContainer'] NOT_OVERWRITING_MSG = ('File {} already exists. If you mean to replace it ' 'then use the argument "overwrite=True".') # A useful regex for tests. _NOT_OVERWRITING_MSG_MATCH = (r'File .* already exists\. If you mean to ' r'replace it then use the argument ' r'"overwrite=True"\.') def isiterable(obj): """Returns `True` if the given object is iterable.""" try: iter(obj) return True except TypeError: return False def indent(s, shift=1, width=4): """Indent a block of text. The indentation is applied to each line.""" indented = '\n'.join(' ' * (width * shift) + l if l else '' for l in s.splitlines()) if s[-1] == '\n': indented += '\n' return indented class _DummyFile: """A noop writeable object.""" def write(self, s): pass @contextlib.contextmanager def silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() yield sys.stdout = old_stdout sys.stderr = old_stderr def format_exception(msg, *args, **kwargs): """ Given an exception message string, uses new-style formatting arguments ``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in information about the exception that occurred. For example: try: 1/0 except: raise ZeroDivisionError( format_except('A divide by zero occurred in {filename} at ' 'line {lineno} of function {func}.')) Any additional positional or keyword arguments passed to this function are also used to format the message. .. note:: This uses `sys.exc_info` to gather up the information needed to fill in the formatting arguments. Since `sys.exc_info` is not carried outside a handled exception, it's not wise to use this outside of an ``except`` clause - if it is, this will substitute '<unknown>' for the 4 formatting arguments. """ tb = traceback.extract_tb(sys.exc_info()[2], limit=1) if len(tb) > 0: filename, lineno, func, text = tb[0] else: filename = lineno = func = text = '<unknown>' return msg.format(*args, filename=filename, lineno=lineno, func=func, text=text, **kwargs) class NumpyRNGContext: """ A context manager (for use with the ``with`` statement) that will seed the numpy random number generator (RNG) to a specific value, and then restore the RNG state back to whatever it was before. This is primarily intended for use in the astropy testing suit, but it may be useful in ensuring reproducibility of Monte Carlo simulations in a science context. Parameters ---------- seed : int The value to use to seed the numpy RNG Examples -------- A typical use case might be:: with NumpyRNGContext(<some seed value you pick>): from numpy import random randarr = random.randn(100) ... run your test using `randarr` ... #Any code using numpy.random at this indent level will act just as it #would have if it had been before the with statement - e.g. whatever #the default seed is. """ def __init__(self, seed): self.seed = seed def __enter__(self): from numpy import random self.startstate = random.get_state() random.seed(self.seed) def __exit__(self, exc_type, exc_value, traceback): from numpy import random random.set_state(self.startstate) def find_api_page(obj, version=None, openinbrowser=True, timeout=None): """ Determines the URL of the API page for the specified object, and optionally open that page in a web browser. .. note:: You must be connected to the internet for this to function even if ``openinbrowser`` is `False`, unless you provide a local version of the documentation to ``version`` (e.g., ``file:///path/to/docs``). Parameters ---------- obj The object to open the docs for or its fully-qualified name (as a str). version : str The doc version - either a version number like '0.1', 'dev' for the development/latest docs, or a URL to point to a specific location that should be the *base* of the documentation. Defaults to latest if you are on aren't on a release, otherwise, the version you are on. openinbrowser : bool If `True`, the `webbrowser` package will be used to open the doc page in a new web browser window. timeout : number, optional The number of seconds to wait before timing-out the query to the astropy documentation. If not given, the default python stdlib timeout will be used. Returns ------- url : str The loaded URL Raises ------ ValueError If the documentation can't be found """ import webbrowser from zlib import decompress from astropy.utils.data import get_readable_fileobj if (not isinstance(obj, str) and hasattr(obj, '__module__') and hasattr(obj, '__name__')): obj = obj.__module__ + '.' + obj.__name__ elif inspect.ismodule(obj): obj = obj.__name__ if version is None: from astropy import version if version.release: version = 'v' + version.version else: version = 'dev' if '://' in version: if version.endswith('index.html'): baseurl = version[:-10] elif version.endswith('/'): baseurl = version else: baseurl = version + '/' elif version == 'dev' or version == 'latest': baseurl = 'http://devdocs.astropy.org/' else: baseurl = f'https://docs.astropy.org/en/{version}/' # Custom request headers; see # https://github.com/astropy/astropy/issues/8990 url = baseurl + 'objects.inv' headers = {'User-Agent': f'Astropy/{version}'} with get_readable_fileobj(url, encoding='binary', remote_timeout=timeout, http_headers=headers) as uf: oiread = uf.read() # need to first read/remove the first four lines, which have info before # the compressed section with the actual object inventory idx = -1 headerlines = [] for _ in range(4): oldidx = idx idx = oiread.index(b'\n', oldidx + 1) headerlines.append(oiread[(oldidx+1):idx].decode('utf-8')) # intersphinx version line, project name, and project version ivers, proj, vers, compr = headerlines if 'The remainder of this file is compressed using zlib' not in compr: raise ValueError('The file downloaded from {} does not seem to be' 'the usual Sphinx objects.inv format. Maybe it ' 'has changed?'.format(baseurl + 'objects.inv')) compressed = oiread[(idx+1):] decompressed = decompress(compressed).decode('utf-8') resurl = None for l in decompressed.strip().splitlines(): ls = l.split() name = ls[0] loc = ls[3] if loc.endswith('$'): loc = loc[:-1] + name if name == obj: resurl = baseurl + loc break if resurl is None: raise ValueError(f'Could not find the docs for the object {obj}') elif openinbrowser: webbrowser.open(resurl) return resurl def signal_number_to_name(signum): """ Given an OS signal number, returns a signal name. If the signal number is unknown, returns ``'UNKNOWN'``. """ # Since these numbers and names are platform specific, we use the # builtin signal module and build a reverse mapping. signal_to_name_map = dict((k, v) for v, k in signal.__dict__.items() if v.startswith('SIG')) return signal_to_name_map.get(signum, 'UNKNOWN') if sys.platform == 'win32': import ctypes def _has_hidden_attribute(filepath): """ Returns True if the given filepath has the hidden attribute on MS-Windows. Based on a post here: https://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection """ if isinstance(filepath, bytes): filepath = filepath.decode(sys.getfilesystemencoding()) try: attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath) result = bool(attrs & 2) and attrs != -1 except AttributeError: result = False return result else: def _has_hidden_attribute(filepath): return False def is_path_hidden(filepath): """ Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden """ name = os.path.basename(os.path.abspath(filepath)) if isinstance(name, bytes): is_dotted = name.startswith(b'.') else: is_dotted = name.startswith('.') return is_dotted or _has_hidden_attribute(filepath) def walk_skip_hidden(top, onerror=None, followlinks=False): """ A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter ``topdown`` from `os.walk`: the directories must always be recursed top-down when using this function. See also -------- os.walk : For a description of the parameters """ for root, dirs, files in os.walk( top, topdown=True, onerror=onerror, followlinks=followlinks): # These lists must be updated in-place so os.walk will skip # hidden directories dirs[:] = [d for d in dirs if not is_path_hidden(d)] files[:] = [f for f in files if not is_path_hidden(f)] yield root, dirs, files class JsonCustomEncoder(json.JSONEncoder): """Support for data types that JSON default encoder does not do. This includes: * Numpy array or number * Complex number * Set * Bytes * astropy.UnitBase * astropy.Quantity Examples -------- >>> import json >>> import numpy as np >>> from astropy.utils.misc import JsonCustomEncoder >>> json.dumps(np.arange(3), cls=JsonCustomEncoder) '[0, 1, 2]' """ def default(self, obj): from astropy import units as u import numpy as np if isinstance(obj, u.Quantity): return dict(value=obj.value, unit=obj.unit.to_string()) if isinstance(obj, (np.number, np.ndarray)): return obj.tolist() elif isinstance(obj, complex): return [obj.real, obj.imag] elif isinstance(obj, set): return list(obj) elif isinstance(obj, bytes): # pragma: py3 return obj.decode() elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)): if obj == u.dimensionless_unscaled: obj = 'dimensionless_unit' else: return obj.to_string() return json.JSONEncoder.default(self, obj) def strip_accents(s): """ Remove accents from a Unicode string. This helps with matching "ångström" to "angstrom", for example. """ return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None): """ When a string isn't found in a set of candidates, we can be nice to provide a list of alternatives in the exception. This convenience function helps to format that part of the exception. Parameters ---------- s : str candidates : sequence of str or dict of str keys n : int The maximum number of results to include. See `difflib.get_close_matches`. cutoff : float In the range [0, 1]. Possibilities that don't score at least that similar to word are ignored. See `difflib.get_close_matches`. fix : callable A callable to modify the results after matching. It should take a single string and return a sequence of strings containing the fixed matches. Returns ------- message : str Returns the string "Did you mean X, Y, or Z?", or the empty string if no alternatives were found. """ if isinstance(s, str): s = strip_accents(s) s_lower = s.lower() # Create a mapping from the lower case name to all capitalization # variants of that name. candidates_lower = {} for candidate in candidates: candidate_lower = candidate.lower() candidates_lower.setdefault(candidate_lower, []) candidates_lower[candidate_lower].append(candidate) # The heuristic here is to first try "singularizing" the word. If # that doesn't match anything use difflib to find close matches in # original, lower and upper case. if s_lower.endswith('s') and s_lower[:-1] in candidates_lower: matches = [s_lower[:-1]] else: matches = difflib.get_close_matches( s_lower, candidates_lower, n=n, cutoff=cutoff) if len(matches): capitalized_matches = set() for match in matches: capitalized_matches.update(candidates_lower[match]) matches = capitalized_matches if fix is not None: mapped_matches = [] for match in matches: mapped_matches.extend(fix(match)) matches = mapped_matches matches = list(set(matches)) matches = sorted(matches) if len(matches) == 1: matches = matches[0] else: matches = (', '.join(matches[:-1]) + ' or ' + matches[-1]) return f'Did you mean {matches}?' return '' _ordered_descriptor_deprecation_message = """\ The {func} {obj_type} is deprecated and may be removed in a future version. You can replace its functionality with a combination of the __init_subclass__ and __set_name__ magic methods introduced in Python 3.6. See https://github.com/astropy/astropy/issues/11094 for recipes on how to replicate their functionality. """ @deprecated('4.3', _ordered_descriptor_deprecation_message) class OrderedDescriptor(metaclass=abc.ABCMeta): """ Base class for descriptors whose order in the class body should be preserved. Intended for use in concert with the `OrderedDescriptorContainer` metaclass. Subclasses of `OrderedDescriptor` must define a value for a class attribute called ``_class_attribute_``. This is the name of a class attribute on the *container* class for these descriptors, which will be set to an `~collections.OrderedDict` at class creation time. This `~collections.OrderedDict` will contain a mapping of all class attributes that were assigned instances of the `OrderedDescriptor` subclass, to the instances themselves. See the documentation for `OrderedDescriptorContainer` for a concrete example. Optionally, subclasses of `OrderedDescriptor` may define a value for a class attribute called ``_name_attribute_``. This should be the name of an attribute on instances of the subclass. When specified, during creation of a class containing these descriptors, the name attribute on each instance will be set to the name of the class attribute it was assigned to on the class. .. note:: Although this class is intended for use with *descriptors* (i.e. classes that define any of the ``__get__``, ``__set__``, or ``__delete__`` magic methods), this base class is not itself a descriptor, and technically this could be used for classes that are not descriptors too. However, use with descriptors is the original intended purpose. """ # This id increments for each OrderedDescriptor instance created, so they # are always ordered in the order they were created. Class bodies are # guaranteed to be executed from top to bottom. Not sure if this is # thread-safe though. _nextid = 1 @property @abc.abstractmethod def _class_attribute_(self): """ Subclasses should define this attribute to the name of an attribute on classes containing this subclass. That attribute will contain the mapping of all instances of that `OrderedDescriptor` subclass defined in the class body. If the same descriptor needs to be used with different classes, each with different names of this attribute, multiple subclasses will be needed. """ _name_attribute_ = None """ Subclasses may optionally define this attribute to specify the name of an attribute on instances of the class that should be filled with the instance's attribute name at class creation time. """ def __init__(self, *args, **kwargs): # The _nextid attribute is shared across all subclasses so that # different subclasses of OrderedDescriptors can be sorted correctly # between themselves self.__order = OrderedDescriptor._nextid OrderedDescriptor._nextid += 1 super().__init__() def __lt__(self, other): """ Defined for convenient sorting of `OrderedDescriptor` instances, which are defined to sort in their creation order. """ if (isinstance(self, OrderedDescriptor) and isinstance(other, OrderedDescriptor)): try: return self.__order < other.__order except AttributeError: raise RuntimeError( 'Could not determine ordering for {} and {}; at least ' 'one of them is not calling super().__init__ in its ' '__init__.'.format(self, other)) else: return NotImplemented @deprecated('4.3', _ordered_descriptor_deprecation_message) class OrderedDescriptorContainer(type): """ Classes should use this metaclass if they wish to use `OrderedDescriptor` attributes, which are class attributes that "remember" the order in which they were defined in the class body. Every subclass of `OrderedDescriptor` has an attribute called ``_class_attribute_``. For example, if we have .. code:: python class ExampleDecorator(OrderedDescriptor): _class_attribute_ = '_examples_' Then when a class with the `OrderedDescriptorContainer` metaclass is created, it will automatically be assigned a class attribute ``_examples_`` referencing an `~collections.OrderedDict` containing all instances of ``ExampleDecorator`` defined in the class body, mapped to by the names of the attributes they were assigned to. When subclassing a class with this metaclass, the descriptor dict (i.e. ``_examples_`` in the above example) will *not* contain descriptors inherited from the base class. That is, this only works by default with decorators explicitly defined in the class body. However, the subclass *may* define an attribute ``_inherit_decorators_`` which lists `OrderedDescriptor` classes that *should* be added from base classes. See the examples section below for an example of this. Examples -------- >>> from astropy.utils import OrderedDescriptor, OrderedDescriptorContainer >>> class TypedAttribute(OrderedDescriptor): ... \"\"\" ... Attributes that may only be assigned objects of a specific type, ... or subclasses thereof. For some reason we care about their order. ... \"\"\" ... ... _class_attribute_ = 'typed_attributes' ... _name_attribute_ = 'name' ... # A default name so that instances not attached to a class can ... # still be repr'd; useful for debugging ... name = '<unbound>' ... ... def __init__(self, type): ... # Make sure not to forget to call the super __init__ ... super().__init__() ... self.type = type ... ... def __get__(self, obj, objtype=None): ... if obj is None: ... return self ... if self.name in obj.__dict__: ... return obj.__dict__[self.name] ... else: ... raise AttributeError(self.name) ... ... def __set__(self, obj, value): ... if not isinstance(value, self.type): ... raise ValueError('{0}.{1} must be of type {2!r}'.format( ... obj.__class__.__name__, self.name, self.type)) ... obj.__dict__[self.name] = value ... ... def __delete__(self, obj): ... if self.name in obj.__dict__: ... del obj.__dict__[self.name] ... else: ... raise AttributeError(self.name) ... ... def __repr__(self): ... if isinstance(self.type, tuple) and len(self.type) > 1: ... typestr = '({0})'.format( ... ', '.join(t.__name__ for t in self.type)) ... else: ... typestr = self.type.__name__ ... return '<{0}(name={1}, type={2})>'.format( ... self.__class__.__name__, self.name, typestr) ... Now let's create an example class that uses this ``TypedAttribute``:: >>> class Point2D(metaclass=OrderedDescriptorContainer): ... x = TypedAttribute((float, int)) ... y = TypedAttribute((float, int)) ... ... def __init__(self, x, y): ... self.x, self.y = x, y ... >>> p1 = Point2D(1.0, 2.0) >>> p1.x 1.0 >>> p1.y 2.0 >>> p2 = Point2D('a', 'b') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Point2D.x must be of type (float, int>) We see that ``TypedAttribute`` works more or less as advertised, but there's nothing special about that. Let's see what `OrderedDescriptorContainer` did for us:: >>> Point2D.typed_attributes OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>), ('y', <TypedAttribute(name=y, type=(float, int))>)]) If we create a subclass, it does *not* by default add inherited descriptors to ``typed_attributes``:: >>> class Point3D(Point2D): ... z = TypedAttribute((float, int)) ... >>> Point3D.typed_attributes OrderedDict([('z', <TypedAttribute(name=z, type=(float, int))>)]) However, if we specify ``_inherit_descriptors_`` from ``Point2D`` then it will do so:: >>> class Point3D(Point2D): ... _inherit_descriptors_ = (TypedAttribute,) ... z = TypedAttribute((float, int)) ... >>> Point3D.typed_attributes OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>), ('y', <TypedAttribute(name=y, type=(float, int))>), ('z', <TypedAttribute(name=z, type=(float, int))>)]) .. note:: Hopefully it is clear from these examples that this construction also allows a class of type `OrderedDescriptorContainer` to use multiple different `OrderedDescriptor` classes simultaneously. """ _inherit_descriptors_ = () def __init__(cls, cls_name, bases, members): descriptors = defaultdict(list) seen = set() inherit_descriptors = () descr_bases = {} for mro_cls in cls.__mro__: for name, obj in mro_cls.__dict__.items(): if name in seen: # Checks if we've already seen an attribute of the given # name (if so it will override anything of the same name in # any base class) continue seen.add(name) if (not isinstance(obj, OrderedDescriptor) or (inherit_descriptors and not isinstance(obj, inherit_descriptors))): # The second condition applies when checking any # subclasses, to see if we can inherit any descriptors of # the given type from subclasses (by default inheritance is # disabled unless the class has _inherit_descriptors_ # defined) continue if obj._name_attribute_ is not None: setattr(obj, obj._name_attribute_, name) # Don't just use the descriptor's class directly; instead go # through its MRO and find the class on which _class_attribute_ # is defined directly. This way subclasses of some # OrderedDescriptor *may* override _class_attribute_ and have # its own _class_attribute_, but by default all subclasses of # some OrderedDescriptor are still grouped together # TODO: It might be worth clarifying this in the docs if obj.__class__ not in descr_bases: for obj_cls_base in obj.__class__.__mro__: if '_class_attribute_' in obj_cls_base.__dict__: descr_bases[obj.__class__] = obj_cls_base descriptors[obj_cls_base].append((obj, name)) break else: # Make sure to put obj first for sorting purposes obj_cls_base = descr_bases[obj.__class__] descriptors[obj_cls_base].append((obj, name)) if not getattr(mro_cls, '_inherit_descriptors_', False): # If _inherit_descriptors_ is undefined then we don't inherit # any OrderedDescriptors from any of the base classes, and # there's no reason to continue through the MRO break else: inherit_descriptors = mro_cls._inherit_descriptors_ for descriptor_cls, instances in descriptors.items(): instances.sort() instances = OrderedDict((key, value) for value, key in instances) setattr(cls, descriptor_cls._class_attribute_, instances) super(OrderedDescriptorContainer, cls).__init__(cls_name, bases, members) LOCALE_LOCK = threading.Lock() @contextmanager def _set_locale(name): """ Context manager to temporarily set the locale to ``name``. An example is setting locale to "C" so that the C strtod() function will use "." as the decimal point to enable consistent numerical string parsing. Note that one cannot nest multiple _set_locale() context manager statements as this causes a threading lock. This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale. Parameters ========== name : str Locale name, e.g. "C" or "fr_FR". """ name = str(name) with LOCALE_LOCK: saved = locale.setlocale(locale.LC_ALL) if saved == name: # Don't do anything if locale is already the requested locale yield else: try: locale.setlocale(locale.LC_ALL, name) yield finally: locale.setlocale(locale.LC_ALL, saved) set_locale = deprecated('4.0')(_set_locale) set_locale.__doc__ = """Deprecated version of :func:`_set_locale` above. See https://github.com/astropy/astropy/issues/9196 """ def dtype_bytes_or_chars(dtype): """ Parse the number out of a dtype.str value like '<U5' or '<f8'. See #5819 for discussion on the need for this function for getting the number of characters corresponding to a string dtype. Parameters ---------- dtype : numpy dtype object Input dtype Returns ------- bytes_or_chars : int or None Bits (for numeric types) or characters (for string types) """ match = re.search(r'(\d+)$', dtype.str) out = int(match.group(1)) if match else None return out def _hungry_for(option): # pragma: no cover """ Open browser loaded with ``option`` options near you. *Disclaimers: Payments not included. Astropy is not responsible for any liability from using this function.* .. note:: Accuracy depends on your browser settings. """ import webbrowser webbrowser.open(f'https://www.google.com/search?q={option}+near+me') def pizza(): # pragma: no cover """``/pizza``""" _hungry_for('pizza') def coffee(is_adam=False, is_brigitta=False): # pragma: no cover """``/coffee``""" if is_adam and is_brigitta: raise ValueError('There can be only one!') if is_adam: option = 'fresh+third+wave+coffee' elif is_brigitta: option = 'decent+espresso' else: option = 'coffee' _hungry_for(option)
14473cc5932ce00bc8510847843aba766fcc9912ce59d803f0eabe6692b4a0e9
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This packages contains python packages that are bundled with Astropy but are external to Astropy, and hence are developed in a separate source tree. Note that this package is distinct from the /cextern directory of the source code distribution, as that directory only contains C extension code. See the README.rst in this directory of the Astropy source repository for more details. """
f19ffe376c65f40c84a4b1767982490c6715617963e6106f75185d6c058e6bef
"""Strptime-related classes and functions. CLASSES: LocaleTime -- Discovers and stores locale-specific time information TimeRE -- Creates regexes for pattern matching a string of text containing time information FUNCTIONS: _getlang -- Figure out what language is being used for the locale strptime -- Calculates the time struct represented by the passed-in string """ # ----------------------------------------------------------------------------- # _strptime.py # # Licensed under PYTHON SOFTWARE FOUNDATION LICENSE # See licenses/PYTHON.rst # # Copied from https://github.com/python/cpython/blob/3.5/Lib/_strptime.py # ----------------------------------------------------------------------------- import time import locale import calendar from re import compile as re_compile from re import IGNORECASE from re import escape as re_escape from datetime import (date as datetime_date, timedelta as datetime_timedelta, timezone as datetime_timezone) try: from _thread import allocate_lock as _thread_allocate_lock except ImportError: from _dummy_thread import allocate_lock as _thread_allocate_lock __all__ = [] def _getlang(): # Figure out what the current language is set to. return locale.getlocale(locale.LC_TIME) class LocaleTime(object): """Stores and handles locale-specific information related to time. ATTRIBUTES: f_weekday -- full weekday names (7-item list) a_weekday -- abbreviated weekday names (7-item list) f_month -- full month names (13-item list; dummy value in [0], which is added by code) a_month -- abbreviated month names (13-item list, dummy value in [0], which is added by code) am_pm -- AM/PM representation (2-item list) LC_date_time -- format string for date/time representation (string) LC_date -- format string for date representation (string) LC_time -- format string for time representation (string) timezone -- daylight- and non-daylight-savings timezone representation (2-item list of sets) lang -- Language used by instance (2-item tuple) """ def __init__(self): """Set all attributes. Order of methods called matters for dependency reasons. The locale language is set at the offset and then checked again before exiting. This is to make sure that the attributes were not set with a mix of information from more than one locale. This would most likely happen when using threads where one thread calls a locale-dependent function while another thread changes the locale while the function in the other thread is still running. Proper coding would call for locks to prevent changing the locale while locale-dependent code is running. The check here is done in case someone does not think about doing this. Only other possible issue is if someone changed the timezone and did not call tz.tzset . That is an issue for the programmer, though, since changing the timezone is worthless without that call. """ self.lang = _getlang() self.__calc_weekday() self.__calc_month() self.__calc_am_pm() self.__calc_timezone() self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") if time.tzname != self.tzname or time.daylight != self.daylight: raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. seq = list(seq) if front: seq.insert(0, '') else: seq.append('') return seq def __calc_weekday(self): # Set self.a_weekday and self.f_weekday using the calendar # module. a_weekday = [calendar.day_abbr[i].lower() for i in range(7)] f_weekday = [calendar.day_name[i].lower() for i in range(7)] self.a_weekday = a_weekday self.f_weekday = f_weekday def __calc_month(self): # Set self.f_month and self.a_month using the calendar module. a_month = [calendar.month_abbr[i].lower() for i in range(13)] f_month = [calendar.month_name[i].lower() for i in range(13)] self.a_month = a_month self.f_month = f_month def __calc_am_pm(self): # Set self.am_pm by using time.strftime(). # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that # magical; just happened to have used it everywhere else where a # static date was needed. am_pm = [] for hour in (1, 22): time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0)) am_pm.append(time.strftime("%p", time_tuple).lower()) self.am_pm = am_pm def __calc_date_time(self): # Set self.date_time, self.date, & self.time by using # time.strftime(). # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of # overloaded numbers is minimized. The order in which searches for # values within the format string is very important; it eliminates # possible ambiguity for what something represents. time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0)) date_time = [None, None, None] date_time[0] = time.strftime("%c", time_tuple).lower() date_time[1] = time.strftime("%x", time_tuple).lower() date_time[2] = time.strftime("%X", time_tuple).lower() replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'), (self.f_month[3], '%B'), (self.a_weekday[2], '%a'), (self.a_month[3], '%b'), (self.am_pm[1], '%p'), ('1999', '%Y'), ('99', '%y'), ('22', '%H'), ('44', '%M'), ('55', '%S'), ('76', '%j'), ('17', '%d'), ('03', '%m'), ('3', '%m'), # '3' needed for when no leading zero. ('2', '%w'), ('10', '%I')] replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone for tz in tz_values]) for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')): current_format = date_time[offset] for old, new in replacement_pairs: # Must deal with possible lack of locale info # manifesting itself as the empty string (e.g., Swedish's # lack of AM/PM info) or a platform returning a tuple of empty # strings (e.g., MacOS 9 having timezone as ('','')). if old: current_format = current_format.replace(old, new) # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since # 2005-01-03 occurs before the first Monday of the year. Otherwise # %U is used. time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0)) if '00' in time.strftime(directive, time_tuple): U_W = '%W' else: U_W = '%U' date_time[offset] = current_format.replace('11', U_W) self.LC_date_time = date_time[0] self.LC_date = date_time[1] self.LC_time = date_time[2] def __calc_timezone(self): # Set self.timezone by using time.tzname. # Do not worry about possibility of time.tzname[0] == time.tzname[1] # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass self.tzname = time.tzname self.daylight = time.daylight no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) if self.daylight: has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) class TimeRE(dict): """Handle conversion from format directives to regexes.""" def __init__(self, locale_time=None): """Create keys/values. Order of execution is important for dependency reasons. """ if locale_time: self.locale_time = locale_time else: self.locale_time = LocaleTime() base = super() base.__init__({ # The " \d" part of the regex is to make %c from ANSI C work 'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])", 'f': r"(?P<f>[0-9]{1,6})", 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)", 'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])", 'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])", 'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])", 'M': r"(?P<M>[0-5]\d|\d)", 'S': r"(?P<S>6[0-1]|[0-5]\d|\d)", 'U': r"(?P<U>5[0-3]|[0-4]\d|\d)", 'w': r"(?P<w>[0-6])", # W is set below by using 'U' 'y': r"(?P<y>\d\d)", #XXX: Does 'Y' need to worry about having less or more than # 4 digits? 'Y': r"(?P<Y>\d\d\d\d)", 'z': r"(?P<z>[+-]\d\d[0-5]\d)", 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'), 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'), 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'), 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'), 'p': self.__seqToRE(self.locale_time.am_pm, 'p'), 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone for tz in tz_names), 'Z'), '%': '%'}) base.__setitem__('W', base.__getitem__('U').replace('U', 'W')) base.__setitem__('c', self.pattern(self.locale_time.LC_date_time)) base.__setitem__('x', self.pattern(self.locale_time.LC_date)) base.__setitem__('X', self.pattern(self.locale_time.LC_time)) def __seqToRE(self, to_convert, directive): """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). """ to_convert = sorted(to_convert, key=len, reverse=True) for value in to_convert: if value != '': break else: return '' regex = '|'.join(re_escape(stuff) for stuff in to_convert) regex = '(?P<%s>%s' % (directive, regex) return '%s)' % regex def pattern(self, format): """Return regex pattern for the format string. Need to make sure that any characters that might be interpreted as regex syntax are escaped. """ processed_format = '' # The sub() call escapes all characters that might be misconstrued # as regex syntax. Cannot use re.escape since we have to deal with # format directives (%m, etc.). regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])") format = regex_chars.sub(r"\\\1", format) whitespace_replacement = re_compile(r'\s+') format = whitespace_replacement.sub(r'\\s+', format) while '%' in format: directive_index = format.index('%')+1 processed_format = "%s%s%s" % (processed_format, format[:directive_index-1], self[format[directive_index]]) format = format[directive_index+1:] return "%s%s" % (processed_format, format) def compile(self, format): """Return a compiled re object for the format string.""" return re_compile(self.pattern(format), IGNORECASE) _cache_lock = _thread_allocate_lock() # DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock # first! _TimeRE_cache = TimeRE() _CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache _regex_cache = {} def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon): """Calculate the Julian day based on the year, week of the year, and day of the week, with week_start_day representing whether the week of the year assumes the week starts on Sunday or Monday (6 or 0).""" first_weekday = datetime_date(year, 1, 1).weekday() # If we are dealing with the %U directive (week starts on Sunday), it's # easier to just shift the view to Sunday being the first day of the # week. if not week_starts_Mon: first_weekday = (first_weekday + 1) % 7 day_of_week = (day_of_week + 1) % 7 # Need to watch out for a week 0 (when the first day of the year is not # the same as that specified by %U or %W). week_0_length = (7 - first_weekday) % 7 if week_of_year == 0: return 1 + day_of_week - first_weekday else: days_to_week = week_0_length + (7 * (week_of_year - 1)) return 1 + days_to_week + day_of_week def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a 2-tuple consisting of a time struct and an int containing the number of microseconds based on the input string and the format string.""" for index, arg in enumerate([data_string, format]): if not isinstance(arg, str): msg = "strptime() argument {} must be str, not {}" raise TypeError(msg.format(index, type(arg))) global _TimeRE_cache, _regex_cache with _cache_lock: locale_time = _TimeRE_cache.locale_time if (_getlang() != locale_time.lang or time.tzname != locale_time.tzname or time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() format_regex = _regex_cache.get(format) if not format_regex: try: format_regex = _TimeRE_cache.compile(format) # KeyError raised when a bad format is found; can be specified as # \\, in which case it was a stray % but with a space after it except KeyError as err: bad_directive = err.args[0] if bad_directive == "\\": bad_directive = "%" del err raise ValueError("'%s' is a bad directive in format '%s'" % (bad_directive, format)) from None # IndexError only occurs when the format string is "%" except IndexError: raise ValueError("stray %% in format '%s'" % format) from None _regex_cache[format] = format_regex found = format_regex.match(data_string) if not found: raise ValueError("time data %r does not match format %r" % (data_string, format)) if len(data_string) != found.end(): raise ValueError("unconverted data remains: %s" % data_string[found.end():]) year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 tzoffset = None # Default to -1 to signify that values not known; not critical to have, # though week_of_year = -1 week_of_year_start = -1 # weekday and julian defaulted to None so as to signal need to calculate # values weekday = julian = None found_dict = found.groupdict() for group_key in found_dict.keys(): # Directives not explicitly handled below: # c, x, X # handled by making out of other directives # U, W # worthless without day of the week if group_key == 'y': year = int(found_dict['y']) # Open Group specification for strptime() states that a %y #value in the range of [00, 68] is in the century 2000, while #[69,99] is in the century 1900 if year <= 68: year += 2000 else: year += 1900 elif group_key == 'Y': year = int(found_dict['Y']) elif group_key == 'm': month = int(found_dict['m']) elif group_key == 'B': month = locale_time.f_month.index(found_dict['B'].lower()) elif group_key == 'b': month = locale_time.a_month.index(found_dict['b'].lower()) elif group_key == 'd': day = int(found_dict['d']) elif group_key == 'H': hour = int(found_dict['H']) elif group_key == 'I': hour = int(found_dict['I']) ampm = found_dict.get('p', '').lower() # If there was no AM/PM indicator, we'll treat this like AM if ampm in ('', locale_time.am_pm[0]): # We're in AM so the hour is correct unless we're # looking at 12 midnight. # 12 midnight == 12 AM == hour 0 if hour == 12: hour = 0 elif ampm == locale_time.am_pm[1]: # We're in PM so we need to add 12 to the hour unless # we're looking at 12 noon. # 12 noon == 12 PM == hour 12 if hour != 12: hour += 12 elif group_key == 'M': minute = int(found_dict['M']) elif group_key == 'S': second = int(found_dict['S']) elif group_key == 'f': s = found_dict['f'] # Pad to always return microseconds. s += "0" * (6 - len(s)) fraction = int(s) elif group_key == 'A': weekday = locale_time.f_weekday.index(found_dict['A'].lower()) elif group_key == 'a': weekday = locale_time.a_weekday.index(found_dict['a'].lower()) elif group_key == 'w': weekday = int(found_dict['w']) if weekday == 0: weekday = 6 else: weekday -= 1 elif group_key == 'j': julian = int(found_dict['j']) elif group_key in ('U', 'W'): week_of_year = int(found_dict[group_key]) if group_key == 'U': # U starts week on Sunday. week_of_year_start = 6 else: # W starts week on Monday. week_of_year_start = 0 elif group_key == 'z': z = found_dict['z'] tzoffset = int(z[1:3]) * 60 + int(z[3:5]) if z.startswith("-"): tzoffset = -tzoffset elif group_key == 'Z': # Since -1 is default value only need to worry about setting tz if # it can be something other than -1. found_zone = found_dict['Z'].lower() for value, tz_values in enumerate(locale_time.timezone): if found_zone in tz_values: # Deal with bad locale setup where timezone names are the # same and yet time.daylight is true; too ambiguous to # be able to tell what timezone has daylight savings if (time.tzname[0] == time.tzname[1] and time.daylight and found_zone not in ("utc", "gmt")): break else: tz = value break leap_year_fix = False if year is None and month == 2 and day == 29: year = 1904 # 1904 is first leap year of 20th century leap_year_fix = True elif year is None: year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian is None and week_of_year != -1 and weekday is not None: week_starts_Mon = True if week_of_year_start == 0 else False julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) if julian <= 0: year -= 1 yday = 366 if calendar.isleap(year) else 365 julian += yday # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the week # calculation. if julian is None: # Need to add 1 to result since first day of the year is 1, not 0. julian = datetime_date(year, month, day).toordinal() - \ datetime_date(year, 1, 1).toordinal() + 1 else: # Assume that if they bothered to include Julian day it will # be accurate. datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal()) year = datetime_result.year month = datetime_result.month day = datetime_result.day if weekday is None: weekday = datetime_date(year, month, day).weekday() # Add timezone info tzname = found_dict.get("Z") if tzoffset is not None: gmtoff = tzoffset * 60 else: gmtoff = None if leap_year_fix: # the caller didn't supply a year but asked for Feb 29th. We couldn't # use the default of 1900 for computations. We set it back to ensure # that February 29th is smaller than March 1st. year = 1900 return (year, month, day, hour, minute, second, weekday, julian, tz, tzname, gmtoff), fraction def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a time struct based on the input string and the format string.""" tt = _strptime(data_string, format)[0] return time.struct_time(tt[:time._STRUCT_TM_ITEMS]) def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"): """Return a class cls instance based on the input string and the format string.""" tt, fraction = _strptime(data_string, format) tzname, gmtoff = tt[-2:] args = tt[:6] + (fraction,) if gmtoff is not None: tzdelta = datetime_timedelta(seconds=gmtoff) if tzname: tz = datetime_timezone(tzdelta, tzname) else: tz = datetime_timezone(tzdelta) args += (tz,) return cls(*args)
a8f4554c6b82ca918621b63ebbadb9a2602875e4ac41f6370082a93223ddeae3
""" Normalization class for Matplotlib that can be used to produce colorbars. """ import inspect import numpy as np from numpy import ma from .interval import (PercentileInterval, AsymmetricPercentileInterval, ManualInterval, MinMaxInterval, BaseInterval) from .stretch import (LinearStretch, SqrtStretch, PowerStretch, LogStretch, AsinhStretch, BaseStretch) try: import matplotlib # pylint: disable=W0611 from matplotlib.colors import Normalize from matplotlib import pyplot as plt except ImportError: class Normalize: def __init__(self, *args, **kwargs): raise ImportError('matplotlib is required in order to use this ' 'class.') __all__ = ['ImageNormalize', 'simple_norm', 'imshow_norm'] __doctest_requires__ = {'*': ['matplotlib']} class ImageNormalize(Normalize): """ Normalization class to be used with Matplotlib. Parameters ---------- data : ndarray, optional The image array. This input is used only if ``interval`` is also input. ``data`` and ``interval`` are used to compute the vmin and/or vmax values only if ``vmin`` or ``vmax`` are not input. interval : `~astropy.visualization.BaseInterval` subclass instance, optional The interval object to apply to the input ``data`` to determine the ``vmin`` and ``vmax`` values. This input is used only if ``data`` is also input. ``data`` and ``interval`` are used to compute the vmin and/or vmax values only if ``vmin`` or ``vmax`` are not input. vmin, vmax : float, optional The minimum and maximum levels to show for the data. The ``vmin`` and ``vmax`` inputs override any calculated values from the ``interval`` and ``data`` inputs. stretch : `~astropy.visualization.BaseStretch` subclass instance The stretch object to apply to the data. The default is `~astropy.visualization.LinearStretch`. clip : bool, optional If `True`, data values outside the [0:1] range are clipped to the [0:1] range. invalid : None or float, optional Value to assign NaN values generated by this class. NaNs in the input ``data`` array are not changed. For matplotlib normalization, the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. """ def __init__(self, data=None, interval=None, vmin=None, vmax=None, stretch=LinearStretch(), clip=False, invalid=-1.0): # this super call checks for matplotlib super().__init__(vmin=vmin, vmax=vmax, clip=clip) self.vmin = vmin self.vmax = vmax if stretch is None: raise ValueError('stretch must be input') if not isinstance(stretch, BaseStretch): raise TypeError('stretch must be an instance of a BaseStretch ' 'subclass') self.stretch = stretch if interval is not None and not isinstance(interval, BaseInterval): raise TypeError('interval must be an instance of a BaseInterval ' 'subclass') self.interval = interval self.inverse_stretch = stretch.inverse self.clip = clip self.invalid = invalid # Define vmin and vmax if not None and data was input if data is not None: self._set_limits(data) def _set_limits(self, data): if self.vmin is not None and self.vmax is not None: return # Define vmin and vmax from the interval class if not None if self.interval is None: if self.vmin is None: self.vmin = np.min(data[np.isfinite(data)]) if self.vmax is None: self.vmax = np.max(data[np.isfinite(data)]) else: _vmin, _vmax = self.interval.get_limits(data) if self.vmin is None: self.vmin = _vmin if self.vmax is None: self.vmax = _vmax def __call__(self, values, clip=None, invalid=None): """ Transform values using this normalization. Parameters ---------- values : array-like The input values. clip : bool, optional If `True`, values outside the [0:1] range are clipped to the [0:1] range. If `None` then the ``clip`` value from the `ImageNormalize` instance is used (the default of which is `False`). invalid : None or float, optional Value to assign NaN values generated by this class. NaNs in the input ``data`` array are not changed. For matplotlib normalization, the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then the `ImageNormalize` instance value is used. This keyword has no effect if ``clip=True``. """ if clip is None: clip = self.clip if invalid is None: invalid = self.invalid if isinstance(values, ma.MaskedArray): if clip: mask = False else: mask = values.mask values = values.filled(self.vmax) else: mask = False # Make sure scalars get broadcast to 1-d if np.isscalar(values): values = np.array([values], dtype=float) else: # copy because of in-place operations after values = np.array(values, copy=True, dtype=float) # Define vmin and vmax if not None self._set_limits(values) # Normalize based on vmin and vmax np.subtract(values, self.vmin, out=values) np.true_divide(values, self.vmax - self.vmin, out=values) # Clip to the 0 to 1 range if clip: values = np.clip(values, 0., 1., out=values) # Stretch values if self.stretch._supports_invalid_kw: values = self.stretch(values, out=values, clip=False, invalid=invalid) else: values = self.stretch(values, out=values, clip=False) # Convert to masked array for matplotlib return ma.array(values, mask=mask) def inverse(self, values, invalid=None): # Find unstretched values in range 0 to 1 if self.inverse_stretch._supports_invalid_kw: values_norm = self.inverse_stretch(values, clip=False, invalid=invalid) else: values_norm = self.inverse_stretch(values, clip=False) # Scale to original range return values_norm * (self.vmax - self.vmin) + self.vmin def simple_norm(data, stretch='linear', power=1.0, asinh_a=0.1, min_cut=None, max_cut=None, min_percent=None, max_percent=None, percent=None, clip=False, log_a=1000, invalid=-1.0): """ Return a Normalization class that can be used for displaying images with Matplotlib. This function enables only a subset of image stretching functions available in `~astropy.visualization.mpl_normalize.ImageNormalize`. This function is used by the ``astropy.visualization.scripts.fits2bitmap`` script. Parameters ---------- data : ndarray The image array. stretch : {'linear', 'sqrt', 'power', log', 'asinh'}, optional The stretch function to apply to the image. The default is 'linear'. power : float, optional The power index for ``stretch='power'``. The default is 1.0. asinh_a : float, optional For ``stretch='asinh'``, the value where the asinh curve transitions from linear to logarithmic behavior, expressed as a fraction of the normalized image. Must be in the range between 0 and 1. The default is 0.1. min_cut : float, optional The pixel value of the minimum cut level. Data values less than ``min_cut`` will set to ``min_cut`` before stretching the image. The default is the image minimum. ``min_cut`` overrides ``min_percent``. max_cut : float, optional The pixel value of the maximum cut level. Data values greater than ``min_cut`` will set to ``min_cut`` before stretching the image. The default is the image maximum. ``max_cut`` overrides ``max_percent``. min_percent : float, optional The percentile value used to determine the pixel value of minimum cut level. The default is 0.0. ``min_percent`` overrides ``percent``. max_percent : float, optional The percentile value used to determine the pixel value of maximum cut level. The default is 100.0. ``max_percent`` overrides ``percent``. percent : float, optional The percentage of the image values used to determine the pixel values of the minimum and maximum cut levels. The lower cut level will set at the ``(100 - percent) / 2`` percentile, while the upper cut level will be set at the ``(100 + percent) / 2`` percentile. The default is 100.0. ``percent`` is ignored if either ``min_percent`` or ``max_percent`` is input. clip : bool, optional If `True`, data values outside the [0:1] range are clipped to the [0:1] range. log_a : float, optional The log index for ``stretch='log'``. The default is 1000. invalid : None or float, optional Value to assign NaN values generated by the normalization. NaNs in the input ``data`` array are not changed. For matplotlib normalization, the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. Returns ------- result : `ImageNormalize` instance An `ImageNormalize` instance that can be used for displaying images with Matplotlib. """ if percent is not None: interval = PercentileInterval(percent) elif min_percent is not None or max_percent is not None: interval = AsymmetricPercentileInterval(min_percent or 0., max_percent or 100.) elif min_cut is not None or max_cut is not None: interval = ManualInterval(min_cut, max_cut) else: interval = MinMaxInterval() if stretch == 'linear': stretch = LinearStretch() elif stretch == 'sqrt': stretch = SqrtStretch() elif stretch == 'power': stretch = PowerStretch(power) elif stretch == 'log': stretch = LogStretch(log_a) elif stretch == 'asinh': stretch = AsinhStretch(asinh_a) else: raise ValueError(f'Unknown stretch: {stretch}.') vmin, vmax = interval.get_limits(data) return ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch, clip=clip, invalid=invalid) # used in imshow_norm _norm_sig = inspect.signature(ImageNormalize) def imshow_norm(data, ax=None, **kwargs): """ A convenience function to call matplotlib's `matplotlib.pyplot.imshow` function, using an `ImageNormalize` object as the normalization. Parameters ---------- data : 2D or 3D array-like The data to show. Can be whatever `~matplotlib.pyplot.imshow` and `ImageNormalize` both accept. See `~matplotlib.pyplot.imshow`. ax : None or `~matplotlib.axes.Axes`, optional If None, use pyplot's imshow. Otherwise, calls ``imshow`` method of the supplied axes. **kwargs : dict, optional All other keyword arguments are parsed first by the `ImageNormalize` initializer, then to `~matplotlib.pyplot.imshow`. Returns ------- result : tuple A tuple containing the `~matplotlib.image.AxesImage` generated by `~matplotlib.pyplot.imshow` as well as the `ImageNormalize` instance. Notes ----- The ``norm`` matplotlib keyword is not supported. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.visualization import (imshow_norm, MinMaxInterval, SqrtStretch) # Generate and display a test image image = np.arange(65536).reshape((256, 256)) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) im, norm = imshow_norm(image, ax, origin='lower', interval=MinMaxInterval(), stretch=SqrtStretch()) fig.colorbar(im) """ if 'X' in kwargs: raise ValueError('Cannot give both ``X`` and ``data``') if 'norm' in kwargs: raise ValueError('There is no point in using imshow_norm if you give ' 'the ``norm`` keyword - use imshow directly if you ' 'want that.') imshow_kwargs = dict(kwargs) norm_kwargs = {'data': data} for pname in _norm_sig.parameters: if pname in kwargs: norm_kwargs[pname] = imshow_kwargs.pop(pname) imshow_kwargs['norm'] = ImageNormalize(**norm_kwargs) if ax is None: imshow_result = plt.imshow(data, **imshow_kwargs) else: imshow_result = ax.imshow(data, **imshow_kwargs) return imshow_result, imshow_kwargs['norm']
465f2a18a4058f716887448b29905301782d13165ec2887cf27855f49d2d9ec0
# Licensed under a 3-clause BSD style license - see LICENSE.rst from .hist import * from .interval import * from .mpl_normalize import * from .mpl_style import * from .stretch import * from .transform import * from .units import * from .time import * from .lupton_rgb import *
86a2da8bc88f3f94938aea572d183e8844b525d5ad3f5da237ccf3e12b898ff8
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004). The three images must be aligned and have the same pixel scale and size. For details, see : https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L """ import numpy as np from . import ZScaleInterval __all__ = ['make_lupton_rgb'] def compute_intensity(image_r, image_g=None, image_b=None): """ Return a naive total intensity from the red, blue, and green intensities. Parameters ---------- image_r : ndarray Intensity of image to be mapped to red; or total intensity if ``image_g`` and ``image_b`` are None. image_g : ndarray, optional Intensity of image to be mapped to green. image_b : ndarray, optional Intensity of image to be mapped to blue. Returns ------- intensity : ndarray Total intensity from the red, blue and green intensities, or ``image_r`` if green and blue images are not provided. """ if image_g is None or image_b is None: if not (image_g is None and image_b is None): raise ValueError("please specify either a single image " "or red, green, and blue images.") return image_r intensity = (image_r + image_g + image_b)/3.0 # Repack into whatever type was passed to us return np.asarray(intensity, dtype=image_r.dtype) class Mapping: """ Baseclass to map red, blue, green intensities into uint8 values. Parameters ---------- minimum : float or sequence(3) Intensity that should be mapped to black (a scalar or array for R, G, B). image : ndarray, optional An image used to calculate some parameters of some mappings. """ def __init__(self, minimum=None, image=None): self._uint8Max = float(np.iinfo(np.uint8).max) try: len(minimum) except TypeError: minimum = 3*[minimum] if len(minimum) != 3: raise ValueError("please provide 1 or 3 values for minimum.") self.minimum = minimum self._image = np.asarray(image) def make_rgb_image(self, image_r, image_g, image_b): """ Convert 3 arrays, image_r, image_g, and image_b into an 8-bit RGB image. Parameters ---------- image_r : ndarray Image to map to red. image_g : ndarray Image to map to green. image_b : ndarray Image to map to blue. Returns ------- RGBimage : ndarray RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array. """ image_r = np.asarray(image_r) image_g = np.asarray(image_g) image_b = np.asarray(image_b) if (image_r.shape != image_g.shape) or (image_g.shape != image_b.shape): msg = "The image shapes must match. r: {}, g: {} b: {}" raise ValueError(msg.format(image_r.shape, image_g.shape, image_b.shape)) return np.dstack(self._convert_images_to_uint8(image_r, image_g, image_b)).astype(np.uint8) def intensity(self, image_r, image_g, image_b): """ Return the total intensity from the red, blue, and green intensities. This is a naive computation, and may be overridden by subclasses. Parameters ---------- image_r : ndarray Intensity of image to be mapped to red; or total intensity if ``image_g`` and ``image_b`` are None. image_g : ndarray, optional Intensity of image to be mapped to green. image_b : ndarray, optional Intensity of image to be mapped to blue. Returns ------- intensity : ndarray Total intensity from the red, blue and green intensities, or ``image_r`` if green and blue images are not provided. """ return compute_intensity(image_r, image_g, image_b) def map_intensity_to_uint8(self, I): """ Return an array which, when multiplied by an image, returns that image mapped to the range of a uint8, [0, 255] (but not converted to uint8). The intensity is assumed to have had minimum subtracted (as that can be done per-band). Parameters ---------- I : ndarray Intensity to be mapped. Returns ------- mapped_I : ndarray ``I`` mapped to uint8 """ with np.errstate(invalid='ignore', divide='ignore'): return np.clip(I, 0, self._uint8Max) def _convert_images_to_uint8(self, image_r, image_g, image_b): """Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images""" image_r = image_r - self.minimum[0] # n.b. makes copy image_g = image_g - self.minimum[1] image_b = image_b - self.minimum[2] fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b)) image_rgb = [image_r, image_g, image_b] for c in image_rgb: c *= fac with np.errstate(invalid='ignore'): c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't pixmax = self._uint8Max r0, g0, b0 = image_rgb # copies -- could work row by row to minimise memory usage with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit for i, c in enumerate(image_rgb): c = np.where(r0 > g0, np.where(r0 > b0, np.where(r0 >= pixmax, c*pixmax/r0, c), np.where(b0 >= pixmax, c*pixmax/b0, c)), np.where(g0 > b0, np.where(g0 >= pixmax, c*pixmax/g0, c), np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8) c[c > pixmax] = pixmax image_rgb[i] = c return image_rgb class LinearMapping(Mapping): """ A linear map map of red, blue, green intensities into uint8 values. A linear stretch from [minimum, maximum]. If one or both are omitted use image min and/or max to set them. Parameters ---------- minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). maximum : float Intensity that should be mapped to white (a scalar). """ def __init__(self, minimum=None, maximum=None, image=None): if minimum is None or maximum is None: if image is None: raise ValueError("you must provide an image if you don't " "set both minimum and maximum") if minimum is None: minimum = image.min() if maximum is None: maximum = image.max() Mapping.__init__(self, minimum=minimum, image=image) self.maximum = maximum if maximum is None: self._range = None else: if maximum == minimum: raise ValueError("minimum and maximum values must not be equal") self._range = float(maximum - minimum) def map_intensity_to_uint8(self, I): with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit return np.where(I <= 0, 0, np.where(I >= self._range, self._uint8Max/I, self._uint8Max/self._range)) class AsinhMapping(Mapping): """ A mapping for an asinh stretch (preserving colours independent of brightness) x = asinh(Q (I - minimum)/stretch)/Q This reduces to a linear stretch if Q == 0 See https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L Parameters ---------- minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). stretch : float The linear stretch of the image. Q : float The asinh softening parameter. """ def __init__(self, minimum, stretch, Q=8): Mapping.__init__(self, minimum) epsilon = 1.0/2**23 # 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit if abs(Q) < epsilon: Q = 0.1 else: Qmax = 1e10 if Q > Qmax: Q = Qmax frac = 0.1 # gradient estimated using frac*stretch is _slope self._slope = frac*self._uint8Max/np.arcsinh(frac*Q) self._soften = Q/float(stretch) def map_intensity_to_uint8(self, I): with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit return np.where(I <= 0, 0, np.arcsinh(I*self._soften)*self._slope/I) class AsinhZScaleMapping(AsinhMapping): """ A mapping for an asinh stretch, estimating the linear stretch by zscale. x = asinh(Q (I - z1)/(z2 - z1))/Q Parameters ---------- image1 : ndarray or a list of arrays The image to analyse, or a list of 3 images to be converted to an intensity image. image2 : ndarray, optional the second image to analyse (must be specified with image3). image3 : ndarray, optional the third image to analyse (must be specified with image2). Q : float, optional The asinh softening parameter. Default is 8. pedestal : float or sequence(3), optional The value, or array of 3 values, to subtract from the images; or None. Notes ----- pedestal, if not None, is removed from the images when calculating the zscale stretch, and added back into Mapping.minimum[] """ def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None): """ """ if image2 is None or image3 is None: if not (image2 is None and image3 is None): raise ValueError("please specify either a single image " "or three images.") image = [image1] else: image = [image1, image2, image3] if pedestal is not None: try: len(pedestal) except TypeError: pedestal = 3*[pedestal] if len(pedestal) != 3: raise ValueError("please provide 1 or 3 pedestals.") image = list(image) # needs to be mutable for i, im in enumerate(image): if pedestal[i] != 0.0: image[i] = im - pedestal[i] # n.b. a copy else: pedestal = len(image)*[0.0] image = compute_intensity(*image) zscale_limits = ZScaleInterval().get_limits(image) zscale = LinearMapping(*zscale_limits, image=image) stretch = zscale.maximum - zscale.minimum[0] # zscale.minimum is always a triple minimum = zscale.minimum for i, level in enumerate(pedestal): minimum[i] += level AsinhMapping.__init__(self, minimum, stretch, Q) self._image = image def make_lupton_rgb(image_r, image_g, image_b, minimum=0, stretch=5, Q=8, filename=None): """ Return a Red/Green/Blue color image from up to 3 images using an asinh stretch. The input images can be int or float, and in any range or bit-depth. For a more detailed look at the use of this method, see the document :ref:`astropy:astropy-visualization-rgb`. Parameters ---------- image_r : ndarray Image to map to red. image_g : ndarray Image to map to green. image_b : ndarray Image to map to blue. minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). stretch : float The linear stretch of the image. Q : float The asinh softening parameter. filename : str Write the resulting RGB image to a file (file type determined from extension). Returns ------- rgb : ndarray RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array. """ asinhMap = AsinhMapping(minimum, stretch, Q) rgb = asinhMap.make_rgb_image(image_r, image_g, image_b) if filename: import matplotlib.image matplotlib.image.imsave(filename, rgb, origin='lower') return rgb
75280a34c72d7d0aecc49fad0cbb7f73a3b2bf26602008b8aeeabfc35f4aa42b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Classes that deal with computing intervals from arrays of values based on various criteria. """ import abc import numpy as np from .transform import BaseTransform __all__ = ['BaseInterval', 'ManualInterval', 'MinMaxInterval', 'AsymmetricPercentileInterval', 'PercentileInterval', 'ZScaleInterval'] class BaseInterval(BaseTransform): """ Base class for the interval classes, which, when called with an array of values, return an interval computed following different algorithms. """ @abc.abstractmethod def get_limits(self, values): """ Return the minimum and maximum value in the interval based on the values provided. Parameters ---------- values : ndarray The image values. Returns ------- vmin, vmax : float The mininium and maximum image value in the interval. """ raise NotImplementedError('Needs to be implemented in a subclass.') def __call__(self, values, clip=True, out=None): """ Transform values using this interval. Parameters ---------- values : array-like The input values. clip : bool, optional If `True` (default), values outside the [0:1] range are clipped to the [0:1] range. out : ndarray, optional If specified, the output values will be placed in this array (typically used for in-place calculations). Returns ------- result : ndarray The transformed values. """ vmin, vmax = self.get_limits(values) if out is None: values = np.subtract(values, float(vmin)) else: if out.dtype.kind != 'f': raise TypeError('Can only do in-place scaling for ' 'floating-point arrays') values = np.subtract(values, float(vmin), out=out) if (vmax - vmin) != 0: np.true_divide(values, vmax - vmin, out=values) if clip: np.clip(values, 0., 1., out=values) return values class ManualInterval(BaseInterval): """ Interval based on user-specified values. Parameters ---------- vmin : float, optional The minimum value in the scaling. Defaults to the image minimum (ignoring NaNs) vmax : float, optional The maximum value in the scaling. Defaults to the image maximum (ignoring NaNs) """ def __init__(self, vmin=None, vmax=None): self.vmin = vmin self.vmax = vmax def get_limits(self, values): # Make sure values is a Numpy array values = np.asarray(values).ravel() # Filter out invalid values (inf, nan) values = values[np.isfinite(values)] vmin = np.min(values) if self.vmin is None else self.vmin vmax = np.max(values) if self.vmax is None else self.vmax return vmin, vmax class MinMaxInterval(BaseInterval): """ Interval based on the minimum and maximum values in the data. """ def get_limits(self, values): # Make sure values is a Numpy array values = np.asarray(values).ravel() # Filter out invalid values (inf, nan) values = values[np.isfinite(values)] return np.min(values), np.max(values) class AsymmetricPercentileInterval(BaseInterval): """ Interval based on a keeping a specified fraction of pixels (can be asymmetric). Parameters ---------- lower_percentile : float The lower percentile below which to ignore pixels. upper_percentile : float The upper percentile above which to ignore pixels. n_samples : int, optional Maximum number of values to use. If this is specified, and there are more values in the dataset as this, then values are randomly sampled from the array (with replacement). """ def __init__(self, lower_percentile, upper_percentile, n_samples=None): self.lower_percentile = lower_percentile self.upper_percentile = upper_percentile self.n_samples = n_samples def get_limits(self, values): # Make sure values is a Numpy array values = np.asarray(values).ravel() # If needed, limit the number of samples. We sample with replacement # since this is much faster. if self.n_samples is not None and values.size > self.n_samples: values = np.random.choice(values, self.n_samples) # Filter out invalid values (inf, nan) values = values[np.isfinite(values)] # Determine values at percentiles vmin, vmax = np.percentile(values, (self.lower_percentile, self.upper_percentile)) return vmin, vmax class PercentileInterval(AsymmetricPercentileInterval): """ Interval based on a keeping a specified fraction of pixels. Parameters ---------- percentile : float The fraction of pixels to keep. The same fraction of pixels is eliminated from both ends. n_samples : int, optional Maximum number of values to use. If this is specified, and there are more values in the dataset as this, then values are randomly sampled from the array (with replacement). """ def __init__(self, percentile, n_samples=None): lower_percentile = (100 - percentile) * 0.5 upper_percentile = 100 - lower_percentile super().__init__( lower_percentile, upper_percentile, n_samples=n_samples) class ZScaleInterval(BaseInterval): """ Interval based on IRAF's zscale. https://iraf.net/forum/viewtopic.php?showtopic=134139 Original implementation: https://github.com/spacetelescope/stsci.numdisplay/blob/master/lib/stsci/numdisplay/zscale.py Licensed under a 3-clause BSD style license (see AURA_LICENSE.rst). Parameters ---------- nsamples : int, optional The number of points in the array to sample for determining scaling factors. Defaults to 1000. contrast : float, optional The scaling factor (between 0 and 1) for determining the minimum and maximum value. Larger values increase the difference between the minimum and maximum values used for display. Defaults to 0.25. max_reject : float, optional If more than ``max_reject * npixels`` pixels are rejected, then the returned values are the minimum and maximum of the data. Defaults to 0.5. min_npixels : int, optional If there are less than ``min_npixels`` pixels remaining after the pixel rejection, then the returned values are the minimum and maximum of the data. Defaults to 5. krej : float, optional The number of sigma used for the rejection. Defaults to 2.5. max_iterations : int, optional The maximum number of iterations for the rejection. Defaults to 5. """ def __init__(self, nsamples=1000, contrast=0.25, max_reject=0.5, min_npixels=5, krej=2.5, max_iterations=5): self.nsamples = nsamples self.contrast = contrast self.max_reject = max_reject self.min_npixels = min_npixels self.krej = krej self.max_iterations = max_iterations def get_limits(self, values): # Sample the image values = np.asarray(values) values = values[np.isfinite(values)] stride = int(max(1.0, values.size / self.nsamples)) samples = values[::stride][:self.nsamples] samples.sort() npix = len(samples) vmin = samples[0] vmax = samples[-1] # Fit a line to the sorted array of samples minpix = max(self.min_npixels, int(npix * self.max_reject)) x = np.arange(npix) ngoodpix = npix last_ngoodpix = npix + 1 # Bad pixels mask used in k-sigma clipping badpix = np.zeros(npix, dtype=bool) # Kernel used to dilate the bad pixels mask ngrow = max(1, int(npix * 0.01)) kernel = np.ones(ngrow, dtype=bool) for _ in range(self.max_iterations): if ngoodpix >= last_ngoodpix or ngoodpix < minpix: break fit = np.polyfit(x, samples, deg=1, w=(~badpix).astype(int)) fitted = np.poly1d(fit)(x) # Subtract fitted line from the data array flat = samples - fitted # Compute the k-sigma rejection threshold threshold = self.krej * flat[~badpix].std() # Detect and reject pixels further than k*sigma from the # fitted line badpix[(flat < - threshold) | (flat > threshold)] = True # Convolve with a kernel of length ngrow badpix = np.convolve(badpix, kernel, mode='same') last_ngoodpix = ngoodpix ngoodpix = np.sum(~badpix) if ngoodpix >= minpix: slope, _ = fit if self.contrast > 0: slope = slope / self.contrast center_pixel = (npix - 1) // 2 median = np.median(samples) vmin = max(vmin, median - (center_pixel - 1) * slope) vmax = min(vmax, median + (npix - center_pixel) * slope) return vmin, vmax
d27a258c47cc26ebda5f0899ad893d2655d4532445dbf90a2e33197da5c32e8e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np __doctest_skip__ = ['quantity_support'] def quantity_support(format='latex_inline'): """ Enable support for plotting `astropy.units.Quantity` instances in matplotlib. May be (optionally) used with a ``with`` statement. >>> import matplotlib.pyplot as plt >>> from astropy import units as u >>> from astropy import visualization >>> with visualization.quantity_support(): ... plt.figure() ... plt.plot([1, 2, 3] * u.m) [...] ... plt.plot([101, 125, 150] * u.cm) [...] ... plt.draw() Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to ``latex_inline``. """ from astropy import units as u # import Angle just so we have a more or less complete list of Quantity # subclasses loaded - matplotlib needs them all separately! # NOTE: in matplotlib >=3.2, subclasses will be recognized automatically, # and once that becomes our minimum version, we can remove this, # adding just u.Quantity itself to the registry. from astropy.coordinates import Angle # noqa from matplotlib import units from matplotlib import ticker # Get all subclass for Quantity, since matplotlib checks on class, # not subclass. def all_issubclass(cls): return {cls}.union( [s for c in cls.__subclasses__() for s in all_issubclass(c)]) def rad_fn(x, pos=None): n = int((x / np.pi) * 2.0 + 0.25) if n == 0: return '0' elif n == 1: return 'π/2' elif n == 2: return 'π' elif n % 2 == 0: return f'{n // 2}π' else: return f'{n}π/2' class MplQuantityConverter(units.ConversionInterface): _all_issubclass_quantity = all_issubclass(u.Quantity) def __init__(self): # Keep track of original converter in case the context manager is # used in a nested way. self._original_converter = {} for cls in self._all_issubclass_quantity: self._original_converter[cls] = units.registry.get(cls) units.registry[cls] = self @staticmethod def axisinfo(unit, axis): if unit == u.radian: return units.AxisInfo( majloc=ticker.MultipleLocator(base=np.pi/2), majfmt=ticker.FuncFormatter(rad_fn), label=unit.to_string(), ) elif unit == u.degree: return units.AxisInfo( majloc=ticker.AutoLocator(), majfmt=ticker.FormatStrFormatter('%i°'), label=unit.to_string(), ) elif unit is not None: return units.AxisInfo(label=unit.to_string(format)) return None @staticmethod def convert(val, unit, axis): if isinstance(val, u.Quantity): return val.to_value(unit) elif isinstance(val, list) and val and isinstance(val[0], u.Quantity): return [v.to_value(unit) for v in val] else: return val @staticmethod def default_units(x, axis): if hasattr(x, 'unit'): return x.unit return None def __enter__(self): return self def __exit__(self, type, value, tb): for cls in self._all_issubclass_quantity: if self._original_converter[cls] is None: del units.registry[cls] else: units.registry[cls] = self._original_converter[cls] return MplQuantityConverter()
8ca125c0a81beae0f81623b0bf15a8297426bd9e9e0b407ada06fdd7b154b64d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from datetime import datetime from astropy.time import Time from astropy import units as u __all__ = ['time_support'] __doctest_requires__ = {'time_support': ['matplotlib']} UNSUPPORTED_FORMATS = ('datetime', 'datetime64') YMDHMS_FORMATS = ('fits', 'iso', 'isot', 'yday') STR_FORMATS = YMDHMS_FORMATS + ('byear_str', 'jyear_str') def time_support(*, scale=None, format=None, simplify=True): """ Enable support for plotting `astropy.time.Time` instances in matplotlib. May be (optionally) used with a ``with`` statement. >>> import matplotlib.pyplot as plt >>> from astropy import units as u >>> from astropy import visualization >>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT ... plt.figure() ... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40'])) ... plt.draw() Parameters ---------- scale : str, optional The time scale to use for the times on the axis. If not specified, the scale of the first Time object passed to Matplotlib is used. format : str, optional The time format to use for the times on the axis. If not specified, the format of the first Time object passed to Matplotlib is used. simplify : bool, optional If possible, simplify labels, e.g. by removing 00:00:00.000 times from ISO strings if all labels fall on that time. """ import matplotlib.units as units from matplotlib.ticker import MaxNLocator, ScalarFormatter from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar class AstropyTimeLocator(MaxNLocator): # Note: we default to AutoLocator since many time formats # can just use this. def __init__(self, converter, *args, **kwargs): kwargs['nbins'] = 4 super().__init__(*args, **kwargs) self._converter = converter def tick_values(self, vmin, vmax): # Where we put the ticks depends on the format we are using if self._converter.format in YMDHMS_FORMATS: # If we are here, we need to check what the range of values # is and decide how to find tick locations accordingly vrange = vmax - vmin if (self._converter.format != 'yday' and vrange > 31) or vrange > 366: # greater than a month # We need to be careful here since not all years and months have # the same length # Start off by converting the values from the range to # datetime objects, so that we can easily extract the year and # month. tmin = Time(vmin, scale=self._converter.scale, format='mjd').datetime tmax = Time(vmax, scale=self._converter.scale, format='mjd').datetime # Find the range of years ymin = tmin.year ymax = tmax.year if ymax > ymin + 1: # greater than a year # Find the step we want to use ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3))) ymin = ystep * (ymin // ystep) # Generate the years for these steps times = [] for year in range(ymin, ymax + 1, ystep): times.append(datetime(year=year, month=1, day=1)) else: # greater than a month but less than a year mmin = tmin.month mmax = tmax.month + 12 * (ymax - ymin) mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3))) mmin = mstep * max(1, mmin // mstep) # Generate the months for these steps times = [] for month in range(mmin, mmax + 1, mstep): times.append(datetime(year=ymin + (month - 1) // 12, month=(month - 1) % 12 + 1, day=1)) # Convert back to MJD values = Time(times, scale=self._converter.scale).mjd elif vrange > 1: # greater than a day self.set_params(steps=[1, 2, 5, 10]) values = super().tick_values(vmin, vmax) else: # Determine ideal step dv = (vmax - vmin) / 3 * 24 << u.hourangle # And round to nearest sensible value dv = select_step_hour(dv).to_value(u.hourangle) / 24 # Determine tick locations imin = np.ceil(vmin / dv) imax = np.floor(vmax / dv) values = np.arange(imin, imax + 1, dtype=np.int64) * dv else: values = super().tick_values(vmin, vmax) # Get rid of values outside of the input interval values = values[(values >= vmin) & (values <= vmax)] return values def __call__(self): vmin, vmax = self.axis.get_view_interval() return self.tick_values(vmin, vmax) class AstropyTimeFormatter(ScalarFormatter): def __init__(self, converter, *args, **kwargs): super().__init__(*args, **kwargs) self._converter = converter self.set_useOffset(False) self.set_scientific(False) def __call__(self, value, pos=None): # Needed for Matplotlib <3.1 if self._converter.format in STR_FORMATS: return self.format_ticks([value])[0] else: return super().__call__(value, pos=pos) def format_ticks(self, values): if len(values) == 0: return [] if self._converter.format in YMDHMS_FORMATS: times = Time(values, format='mjd', scale=self._converter.scale) formatted = getattr(times, self._converter.format) if self._converter.simplify: if self._converter.format in ('fits', 'iso', 'isot'): if all([x.endswith('00:00:00.000') for x in formatted]): split = ' ' if self._converter.format == 'iso' else 'T' formatted = [x.split(split)[0] for x in formatted] elif self._converter.format == 'yday': if all([x.endswith(':001:00:00:00.000') for x in formatted]): formatted = [x.split(':', 1)[0] for x in formatted] return formatted elif self._converter.format == 'byear_str': return Time(values, format='byear', scale=self._converter.scale).byear_str elif self._converter.format == 'jyear_str': return Time(values, format='jyear', scale=self._converter.scale).jyear_str else: return super().format_ticks(values) class MplTimeConverter(units.ConversionInterface): def __init__(self, scale=None, format=None, simplify=None): super().__init__() self.format = format self.scale = scale self.simplify = simplify # Keep track of original converter in case the context manager is # used in a nested way. self._original_converter = units.registry.get(Time) units.registry[Time] = self @property def format(self): return self._format @format.setter def format(self, value): if value in UNSUPPORTED_FORMATS: raise ValueError(f'time_support does not support format={value}') self._format = value def __enter__(self): return self def __exit__(self, type, value, tb): if self._original_converter is None: del units.registry[Time] else: units.registry[Time] = self._original_converter def default_units(self, x, axis): if isinstance(x, tuple): x = x[0] if self.format is None: self.format = x.format if self.scale is None: self.scale = x.scale return 'astropy_time' def convert(self, value, unit, axis): """ Convert a Time value to a scalar or array. """ scaled = getattr(value, self.scale) if self.format in YMDHMS_FORMATS: return scaled.mjd elif self.format == 'byear_str': return scaled.byear elif self.format == 'jyear_str': return scaled.jyear else: return getattr(scaled, self.format) def axisinfo(self, unit, axis): """ Return major and minor tick locators and formatters. """ majloc = AstropyTimeLocator(self) majfmt = AstropyTimeFormatter(self) return units.AxisInfo(majfmt=majfmt, majloc=majloc, label=f'Time ({self.scale})') return MplTimeConverter(scale=scale, format=format, simplify=simplify)
041f9e8a9f9a2f169a6924de41239c68605c8025392f851daff4ec55ed3a2e93
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module contains dictionaries that can be used to set a matplotlib # plotting style. It is no longer documented/recommended as of Astropy v3.0 # but is kept here for backward-compatibility. __all__ = ['astropy_mpl_style_1', 'astropy_mpl_style'] # Version 1 astropy plotting style for matplotlib astropy_mpl_style_1 = { # Lines 'lines.linewidth': 1.7, 'lines.antialiased': True, # Patches 'patch.linewidth': 1.0, 'patch.facecolor': '#348ABD', 'patch.edgecolor': '#CCCCCC', 'patch.antialiased': True, # Images 'image.cmap': 'gist_heat', 'image.origin': 'upper', # Font 'font.size': 12.0, # Axes 'axes.facecolor': '#FFFFFF', 'axes.edgecolor': '#AAAAAA', 'axes.linewidth': 1.0, 'axes.grid': True, 'axes.titlesize': 'x-large', 'axes.labelsize': 'large', 'axes.labelcolor': 'k', 'axes.axisbelow': True, # Ticks 'xtick.major.size': 0, 'xtick.minor.size': 0, 'xtick.major.pad': 6, 'xtick.minor.pad': 6, 'xtick.color': '#565656', 'xtick.direction': 'in', 'ytick.major.size': 0, 'ytick.minor.size': 0, 'ytick.major.pad': 6, 'ytick.minor.pad': 6, 'ytick.color': '#565656', 'ytick.direction': 'in', # Legend 'legend.fancybox': True, 'legend.loc': 'best', # Figure 'figure.figsize': [8, 6], 'figure.facecolor': '1.0', 'figure.edgecolor': '0.50', 'figure.subplot.hspace': 0.5, # Other 'savefig.dpi': 72, } color_cycle = ['#348ABD', # blue '#7A68A6', # purple '#A60628', # red '#467821', # green '#CF4457', # pink '#188487', # turquoise '#E24A33'] # orange try: # This is a dependency of matplotlib, so should be present if matplotlib # is installed. from cycler import cycler astropy_mpl_style_1['axes.prop_cycle'] = cycler('color', color_cycle) except ImportError: astropy_mpl_style_1['axes.color_cycle'] = color_cycle astropy_mpl_style = astropy_mpl_style_1 """The most recent version of the astropy plotting style."""
ffcbd5262c2d871bf6de6f7fa3b9601d510f25d706c5c794a0d0db9e00eac350
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.stats.histogram import calculate_bin_edges __all__ = ['hist'] def hist(x, bins=10, ax=None, max_bins=1e5, **kwargs): """Enhanced histogram function This is a histogram function that enables the use of more sophisticated algorithms for determining bins. Aside from the ``bins`` argument allowing a string specified how bins are computed, the parameters are the same as pylab.hist(). This function was ported from astroML: https://www.astroml.org/ Parameters ---------- x : array-like array of data to be histogrammed bins : int, list, or str, optional If bins is a string, then it must be one of: - 'blocks' : use bayesian blocks for dynamic bin widths - 'knuth' : use Knuth's rule to determine bins - 'scott' : use Scott's rule to determine bins - 'freedman' : use the Freedman-Diaconis rule to determine bins ax : `~matplotlib.axes.Axes` instance, optional Specify the Axes on which to draw the histogram. If not specified, then the current active axes will be used. max_bins : int, optional Maximum number of bins allowed. With more than a few thousand bins the performance of matplotlib will not be great. If the number of bins is large *and* the number of input data points is large then the it will take a very long time to compute the histogram. **kwargs : other keyword arguments are described in ``plt.hist()``. Notes ----- Return values are the same as for ``plt.hist()`` See Also -------- astropy.stats.histogram """ # Note that we only calculate the bin edges...matplotlib will calculate # the actual histogram. range = kwargs.get('range', None) weights = kwargs.get('weights', None) bins = calculate_bin_edges(x, bins, range=range, weights=weights) if len(bins) > max_bins: raise ValueError('Histogram has too many bins: ' '{nbin}. Use max_bins to increase the number ' 'of allowed bins or range to restrict ' 'the histogram range.'.format(nbin=len(bins))) if ax is None: # optional dependency; only import if strictly needed. import matplotlib.pyplot as plt ax = plt.gca() return ax.hist(x, bins, **kwargs)
71a8c989a3e3bb7f3c9326c21f7b5e93f14b6a8c52bebe465086e07a76482b4c
# Licensed under a 3-clause BSD style license - see LICENSE.rst __all__ = ['BaseTransform', 'CompositeTransform'] class BaseTransform: """ A transformation object. This is used to construct transformations such as scaling, stretching, and so on. """ def __add__(self, other): return CompositeTransform(other, self) class CompositeTransform(BaseTransform): """ A combination of two transforms. Parameters ---------- transform_1 : :class:`astropy.visualization.BaseTransform` The first transform to apply. transform_2 : :class:`astropy.visualization.BaseTransform` The second transform to apply. """ def __init__(self, transform_1, transform_2): super().__init__() self.transform_1 = transform_1 self.transform_2 = transform_2 def __call__(self, values, clip=True): return self.transform_2(self.transform_1(values, clip=clip), clip=clip) @property def inverse(self): return self.__class__(self.transform_2.inverse, self.transform_1.inverse)
7d92dc92299b06452af6e261848cb0b900fe24550a613162a0883a72bdf1b4f9
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Classes that deal with stretching, i.e. mapping a range of [0:1] values onto another set of [0:1] values with a transformation """ import numpy as np from .transform import BaseTransform from .transform import CompositeTransform __all__ = ["BaseStretch", "LinearStretch", "SqrtStretch", "PowerStretch", "PowerDistStretch", "SquaredStretch", "LogStretch", "AsinhStretch", "SinhStretch", "HistEqStretch", "ContrastBiasStretch", "CompositeStretch"] def _logn(n, x, out=None): """Calculate the log base n of x.""" # We define this because numpy.lib.scimath.logn doesn't support out= if out is None: return np.log(x) / np.log(n) else: np.log(x, out=out) np.true_divide(out, np.log(n), out=out) return out def _prepare(values, clip=True, out=None): """ Prepare the data by optionally clipping and copying, and return the array that should be subsequently used for in-place calculations. """ if clip: return np.clip(values, 0., 1., out=out) else: if out is None: return np.array(values, copy=True) else: out[:] = np.asarray(values) return out class BaseStretch(BaseTransform): """ Base class for the stretch classes, which, when called with an array of values in the range [0:1], return an transformed array of values, also in the range [0:1]. """ @property def _supports_invalid_kw(self): return False def __add__(self, other): return CompositeStretch(other, self) def __call__(self, values, clip=True, out=None): """ Transform values using this stretch. Parameters ---------- values : array-like The input values, which should already be normalized to the [0:1] range. clip : bool, optional If `True` (default), values outside the [0:1] range are clipped to the [0:1] range. out : ndarray, optional If specified, the output values will be placed in this array (typically used for in-place calculations). Returns ------- result : ndarray The transformed values. """ @property def inverse(self): """A stretch object that performs the inverse operation.""" class LinearStretch(BaseStretch): """ A linear stretch with a slope and offset. The stretch is given by: .. math:: y = slope x + intercept Parameters ---------- slope : float, optional The ``slope`` parameter used in the above formula. Default is 1. intercept : float, optional The ``intercept`` parameter used in the above formula. Default is 0. """ def __init__(self, slope=1, intercept=0): super().__init__() self.slope = slope self.intercept = intercept def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) if self.slope != 1: np.multiply(values, self.slope, out=values) if self.intercept != 0: np.add(values, self.intercept, out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return LinearStretch(1. / self.slope, - self.intercept / self.slope) class SqrtStretch(BaseStretch): r""" A square root stretch. The stretch is given by: .. math:: y = \sqrt{x} """ @property def _supports_invalid_kw(self): return True def __call__(self, values, clip=True, out=None, invalid=None): """ Transform values using this stretch. Parameters ---------- values : array-like The input values, which should already be normalized to the [0:1] range. clip : bool, optional If `True` (default), values outside the [0:1] range are clipped to the [0:1] range. out : ndarray, optional If specified, the output values will be placed in this array (typically used for in-place calculations). invalid : None or float, optional Value to assign NaN values generated by this class. NaNs in the input ``values`` array are not changed. This option is generally used with matplotlib normalization classes, where the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. Returns ------- result : ndarray The transformed values. """ values = _prepare(values, clip=clip, out=out) replace_invalid = not clip and invalid is not None with np.errstate(invalid='ignore'): if replace_invalid: idx = (values < 0) np.sqrt(values, out=values) if replace_invalid: # Assign new NaN (i.e., NaN not in the original input # values, but generated by this class) to the invalid value. values[idx] = invalid return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return PowerStretch(2) class PowerStretch(BaseStretch): r""" A power stretch. The stretch is given by: .. math:: y = x^a Parameters ---------- a : float The power index (see the above formula). ``a`` must be greater than 0. """ @property def _supports_invalid_kw(self): return True def __init__(self, a): super().__init__() if a <= 0: raise ValueError("a must be > 0") self.power = a def __call__(self, values, clip=True, out=None, invalid=None): """ Transform values using this stretch. Parameters ---------- values : array-like The input values, which should already be normalized to the [0:1] range. clip : bool, optional If `True` (default), values outside the [0:1] range are clipped to the [0:1] range. out : ndarray, optional If specified, the output values will be placed in this array (typically used for in-place calculations). invalid : None or float, optional Value to assign NaN values generated by this class. NaNs in the input ``values`` array are not changed. This option is generally used with matplotlib normalization classes, where the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. Returns ------- result : ndarray The transformed values. """ values = _prepare(values, clip=clip, out=out) replace_invalid = (not clip and invalid is not None and ((-1 < self.power < 0) or (0 < self.power < 1))) with np.errstate(invalid='ignore'): if replace_invalid: idx = (values < 0) np.power(values, self.power, out=values) if replace_invalid: # Assign new NaN (i.e., NaN not in the original input # values, but generated by this class) to the invalid value. values[idx] = invalid return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return PowerStretch(1. / self.power) class PowerDistStretch(BaseStretch): r""" An alternative power stretch. The stretch is given by: .. math:: y = \frac{a^x - 1}{a - 1} Parameters ---------- a : float, optional The ``a`` parameter used in the above formula. ``a`` must be greater than or equal to 0, but cannot be set to 1. Default is 1000. """ def __init__(self, a=1000.0): if a < 0 or a == 1: # singularity raise ValueError("a must be >= 0, but cannot be set to 1") super().__init__() self.exp = a def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) np.power(self.exp, values, out=values) np.subtract(values, 1, out=values) np.true_divide(values, self.exp - 1.0, out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return InvertedPowerDistStretch(a=self.exp) class InvertedPowerDistStretch(BaseStretch): r""" Inverse transformation for `~astropy.image.scaling.PowerDistStretch`. The stretch is given by: .. math:: y = \frac{\log(y (a-1) + 1)}{\log a} Parameters ---------- a : float, optional The ``a`` parameter used in the above formula. ``a`` must be greater than or equal to 0, but cannot be set to 1. Default is 1000. """ def __init__(self, a=1000.0): if a < 0 or a == 1: # singularity raise ValueError("a must be >= 0, but cannot be set to 1") super().__init__() self.exp = a def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) np.multiply(values, self.exp - 1.0, out=values) np.add(values, 1, out=values) _logn(self.exp, values, out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return PowerDistStretch(a=self.exp) class SquaredStretch(PowerStretch): r""" A convenience class for a power stretch of 2. The stretch is given by: .. math:: y = x^2 """ def __init__(self): super().__init__(2) @property def inverse(self): """A stretch object that performs the inverse operation.""" return SqrtStretch() class LogStretch(BaseStretch): r""" A log stretch. The stretch is given by: .. math:: y = \frac{\log{(a x + 1)}}{\log{(a + 1)}} Parameters ---------- a : float The ``a`` parameter used in the above formula. ``a`` must be greater than 0. Default is 1000. """ @property def _supports_invalid_kw(self): return True def __init__(self, a=1000.0): super().__init__() if a <= 0: # singularity raise ValueError("a must be > 0") self.exp = a def __call__(self, values, clip=True, out=None, invalid=None): """ Transform values using this stretch. Parameters ---------- values : array-like The input values, which should already be normalized to the [0:1] range. clip : bool, optional If `True` (default), values outside the [0:1] range are clipped to the [0:1] range. out : ndarray, optional If specified, the output values will be placed in this array (typically used for in-place calculations). invalid : None or float, optional Value to assign NaN values generated by this class. NaNs in the input ``values`` array are not changed. This option is generally used with matplotlib normalization classes, where the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. Returns ------- result : ndarray The transformed values. """ values = _prepare(values, clip=clip, out=out) replace_invalid = not clip and invalid is not None with np.errstate(invalid='ignore'): if replace_invalid: idx = (values < 0) np.multiply(values, self.exp, out=values) np.add(values, 1., out=values) np.log(values, out=values) np.true_divide(values, np.log(self.exp + 1.), out=values) if replace_invalid: # Assign new NaN (i.e., NaN not in the original input # values, but generated by this class) to the invalid value. values[idx] = invalid return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return InvertedLogStretch(self.exp) class InvertedLogStretch(BaseStretch): r""" Inverse transformation for `~astropy.image.scaling.LogStretch`. The stretch is given by: .. math:: y = \frac{e^{y \log{a + 1}} - 1}{a} \\ y = \frac{e^{y} (a + 1) - 1}{a} Parameters ---------- a : float, optional The ``a`` parameter used in the above formula. ``a`` must be greater than 0. Default is 1000. """ def __init__(self, a): super().__init__() if a <= 0: # singularity raise ValueError("a must be > 0") self.exp = a def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) np.multiply(values, np.log(self.exp + 1.), out=values) np.exp(values, out=values) np.subtract(values, 1., out=values) np.true_divide(values, self.exp, out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return LogStretch(self.exp) class AsinhStretch(BaseStretch): r""" An asinh stretch. The stretch is given by: .. math:: y = \frac{{\rm asinh}(x / a)}{{\rm asinh}(1 / a)}. Parameters ---------- a : float, optional The ``a`` parameter used in the above formula. The value of this parameter is where the asinh curve transitions from linear to logarithmic behavior, expressed as a fraction of the normalized image. ``a`` must be greater than 0 and less than or equal to 1 (0 < a <= 1). Default is 0.1. """ def __init__(self, a=0.1): super().__init__() if a <= 0 or a > 1: raise ValueError("a must be > 0 and <= 1") self.a = a def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) np.true_divide(values, self.a, out=values) np.arcsinh(values, out=values) np.true_divide(values, np.arcsinh(1. / self.a), out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return SinhStretch(a=1. / np.arcsinh(1. / self.a)) class SinhStretch(BaseStretch): r""" A sinh stretch. The stretch is given by: .. math:: y = \frac{{\rm sinh}(x / a)}{{\rm sinh}(1 / a)} Parameters ---------- a : float, optional The ``a`` parameter used in the above formula. ``a`` must be greater than 0 and less than or equal to 1 (0 < a <= 1). Default is 1/3. """ def __init__(self, a=1./3.): super().__init__() if a <= 0 or a > 1: raise ValueError("a must be > 0 and <= 1") self.a = a def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) np.true_divide(values, self.a, out=values) np.sinh(values, out=values) np.true_divide(values, np.sinh(1. / self.a), out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return AsinhStretch(a=1. / np.sinh(1. / self.a)) class HistEqStretch(BaseStretch): """ A histogram equalization stretch. Parameters ---------- data : array-like The data defining the equalization. values : array-like, optional The input image values, which should already be normalized to the [0:1] range. """ def __init__(self, data, values=None): # Assume data is not necessarily normalized at this point self.data = np.sort(data.ravel()) self.data = self.data[np.isfinite(self.data)] vmin = self.data.min() vmax = self.data.max() self.data = (self.data - vmin) / (vmax - vmin) # Compute relative position of each pixel if values is None: self.values = np.linspace(0., 1., len(self.data)) else: self.values = values def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) values[:] = np.interp(values, self.data, self.values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return InvertedHistEqStretch(self.data, values=self.values) class InvertedHistEqStretch(BaseStretch): """ Inverse transformation for `~astropy.image.scaling.HistEqStretch`. Parameters ---------- data : array-like The data defining the equalization. values : array-like, optional The input image values, which should already be normalized to the [0:1] range. """ def __init__(self, data, values=None): self.data = data[np.isfinite(data)] if values is None: self.values = np.linspace(0., 1., len(self.data)) else: self.values = values def __call__(self, values, clip=True, out=None): values = _prepare(values, clip=clip, out=out) values[:] = np.interp(values, self.values, self.data) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return HistEqStretch(self.data, values=self.values) class ContrastBiasStretch(BaseStretch): r""" A stretch that takes into account contrast and bias. The stretch is given by: .. math:: y = (x - {\rm bias}) * {\rm contrast} + 0.5 and the output values are clipped to the [0:1] range. Parameters ---------- contrast : float The contrast parameter (see the above formula). bias : float The bias parameter (see the above formula). """ def __init__(self, contrast, bias): super().__init__() self.contrast = contrast self.bias = bias def __call__(self, values, clip=True, out=None): # As a special case here, we only clip *after* the # transformation since it does not map [0:1] to [0:1] values = _prepare(values, clip=False, out=out) np.subtract(values, self.bias, out=values) np.multiply(values, self.contrast, out=values) np.add(values, 0.5, out=values) if clip: np.clip(values, 0, 1, out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return InvertedContrastBiasStretch(self.contrast, self.bias) class InvertedContrastBiasStretch(BaseStretch): """ Inverse transformation for ContrastBiasStretch. Parameters ---------- contrast : float The contrast parameter (see `~astropy.visualization.ConstrastBiasStretch). bias : float The bias parameter (see `~astropy.visualization.ConstrastBiasStretch). """ def __init__(self, contrast, bias): super().__init__() self.contrast = contrast self.bias = bias def __call__(self, values, clip=True, out=None): # As a special case here, we only clip *after* the # transformation since it does not map [0:1] to [0:1] values = _prepare(values, clip=False, out=out) np.subtract(values, 0.5, out=values) np.true_divide(values, self.contrast, out=values) np.add(values, self.bias, out=values) if clip: np.clip(values, 0, 1, out=values) return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return ContrastBiasStretch(self.contrast, self.bias) class CompositeStretch(CompositeTransform, BaseStretch): """ A combination of two stretches. Parameters ---------- stretch_1 : :class:`astropy.visualization.BaseStretch` The first stretch to apply. stretch_2 : :class:`astropy.visualization.BaseStretch` The second stretch to apply. """ def __call__(self, values, clip=True, out=None): return self.transform_2( self.transform_1(values, clip=clip, out=out), clip=clip, out=out)
7e668bae4f20258a9698f2b39e14f5cec0a4e6266290b947aeb0c0867defa288
# Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy from inspect import signature from itertools import islice import warnings from functools import wraps from astropy.utils.exceptions import AstropyUserWarning from .nddata import NDData __all__ = ['support_nddata'] # All supported properties are optional except "data" which is mandatory! SUPPORTED_PROPERTIES = ['data', 'uncertainty', 'mask', 'meta', 'unit', 'wcs', 'flags'] def support_nddata(_func=None, accepts=NDData, repack=False, returns=None, keeps=None, **attribute_argument_mapping): """Decorator to wrap functions that could accept an NDData instance with its properties passed as function arguments. Parameters ---------- _func : callable, None, optional The function to decorate or ``None`` if used as factory. The first positional argument should be ``data`` and take a numpy array. It is possible to overwrite the name, see ``attribute_argument_mapping`` argument. Default is ``None``. accepts : class, optional The class or subclass of ``NDData`` that should be unpacked before calling the function. Default is ``NDData`` repack : bool, optional Should be ``True`` if the return should be converted to the input class again after the wrapped function call. Default is ``False``. .. note:: Must be ``True`` if either one of ``returns`` or ``keeps`` is specified. returns : iterable, None, optional An iterable containing strings which returned value should be set on the class. For example if a function returns data and mask, this should be ``['data', 'mask']``. If ``None`` assume the function only returns one argument: ``'data'``. Default is ``None``. .. note:: Must be ``None`` if ``repack=False``. keeps : iterable. None, optional An iterable containing strings that indicate which values should be copied from the original input to the returned class. If ``None`` assume that no attributes are copied. Default is ``None``. .. note:: Must be ``None`` if ``repack=False``. attribute_argument_mapping : Keyword parameters that optionally indicate which function argument should be interpreted as which attribute on the input. By default it assumes the function takes a ``data`` argument as first argument, but if the first argument is called ``input`` one should pass ``support_nddata(..., data='input')`` to the function. Returns ------- decorator_factory or decorated_function : callable If ``_func=None`` this returns a decorator, otherwise it returns the decorated ``_func``. Notes ----- If properties of ``NDData`` are set but have no corresponding function argument a Warning is shown. If a property is set of the ``NDData`` are set and an explicit argument is given, the explicitly given argument is used and a Warning is shown. The supported properties are: - ``mask`` - ``unit`` - ``wcs`` - ``meta`` - ``uncertainty`` - ``flags`` Examples -------- This function takes a Numpy array for the data, and some WCS information with the ``wcs`` keyword argument:: def downsample(data, wcs=None): # downsample data and optionally WCS here pass However, you might have an NDData instance that has the ``wcs`` property set and you would like to be able to call the function with ``downsample(my_nddata)`` and have the WCS information, if present, automatically be passed to the ``wcs`` keyword argument. This decorator can be used to make this possible:: @support_nddata def downsample(data, wcs=None): # downsample data and optionally WCS here pass This function can now either be called as before, specifying the data and WCS separately, or an NDData instance can be passed to the ``data`` argument. """ if (returns is not None or keeps is not None) and not repack: raise ValueError('returns or keeps should only be set if repack=True.') elif returns is None and repack: raise ValueError('returns should be set if repack=True.') else: # Use empty lists for returns and keeps so we don't need to check # if any of those is None later on. if returns is None: returns = [] if keeps is None: keeps = [] # Short version to avoid the long variable name later. attr_arg_map = attribute_argument_mapping if any(keep in returns for keep in keeps): raise ValueError("cannot specify the same attribute in `returns` and " "`keeps`.") all_returns = returns + keeps def support_nddata_decorator(func): # Find out args and kwargs func_args, func_kwargs = [], [] sig = signature(func).parameters for param_name, param in sig.items(): if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD): raise ValueError("func may not have *args or **kwargs.") try: if param.default == param.empty: func_args.append(param_name) else: func_kwargs.append(param_name) # The comparison to param.empty may fail if the default is a # numpy array or something similar. So if the comparison fails then # it's quite obvious that there was a default and it should be # appended to the "func_kwargs". except ValueError as exc: if ('The truth value of an array with more than one element ' 'is ambiguous.') in str(exc): func_kwargs.append(param_name) else: raise # First argument should be data if not func_args or func_args[0] != attr_arg_map.get('data', 'data'): raise ValueError("Can only wrap functions whose first positional " "argument is `{}`" "".format(attr_arg_map.get('data', 'data'))) @wraps(func) def wrapper(data, *args, **kwargs): bound_args = signature(func).bind(data, *args, **kwargs) unpack = isinstance(data, accepts) input_data = data ignored = [] if not unpack and isinstance(data, NDData): raise TypeError("Only NDData sub-classes that inherit from {}" " can be used by this function" "".format(accepts.__name__)) # If data is an NDData instance, we can try and find properties # that can be passed as kwargs. if unpack: # We loop over a list of pre-defined properties for prop in islice(SUPPORTED_PROPERTIES, 1, None): # We only need to do something if the property exists on # the NDData object try: value = getattr(data, prop) except AttributeError: continue # Skip if the property exists but is None or empty. if prop == 'meta' and not value: continue elif value is None: continue # Warn if the property is set but not used by the function. propmatch = attr_arg_map.get(prop, prop) if propmatch not in func_kwargs: ignored.append(prop) continue # Check if the property was explicitly given and issue a # Warning if it is. if propmatch in bound_args.arguments: # If it's in the func_args it's trivial but if it was # in the func_kwargs we need to compare it to the # default. # Comparison to the default is done by comparing their # identity, this works because defaults in function # signatures are only created once and always reference # the same item. # FIXME: Python interns some values, for example the # integers from -5 to 255 (any maybe some other types # as well). In that case the default is # indistinguishable from an explicitly passed kwarg # and it won't notice that and use the attribute of the # NDData. if (propmatch in func_args or (propmatch in func_kwargs and (bound_args.arguments[propmatch] is not sig[propmatch].default))): warnings.warn( "Property {} has been passed explicitly and " "as an NDData property{}, using explicitly " "specified value" "".format(propmatch, '' if prop == propmatch else ' ' + prop), AstropyUserWarning) continue # Otherwise use the property as input for the function. kwargs[propmatch] = value # Finally, replace data by the data attribute data = data.data if ignored: warnings.warn("The following attributes were set on the " "data object, but will be ignored by the " "function: " + ", ".join(ignored), AstropyUserWarning) result = func(data, *args, **kwargs) if unpack and repack: # If there are multiple required returned arguments make sure # the result is a tuple (because we don't want to unpack # numpy arrays or compare their length, never!) and has the # same length. if len(returns) > 1: if (not isinstance(result, tuple) or len(returns) != len(result)): raise ValueError("Function did not return the " "expected number of arguments.") elif len(returns) == 1: result = [result] if keeps is not None: for keep in keeps: result.append(deepcopy(getattr(input_data, keep))) resultdata = result[all_returns.index('data')] resultkwargs = {ret: res for ret, res in zip(all_returns, result) if ret != 'data'} return input_data.__class__(resultdata, **resultkwargs) else: return result return wrapper # If _func is set, this means that the decorator was used without # parameters so we have to return the result of the # support_nddata_decorator decorator rather than the decorator itself if _func is not None: return support_nddata_decorator(_func) else: return support_nddata_decorator
ac8cd0f1c0ff0084933fc0a15b99db05ce918df64d6318ba9d53698032d3dcdc
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base NDDataBase class. from abc import ABCMeta, abstractmethod __all__ = ['NDDataBase'] class NDDataBase(metaclass=ABCMeta): """Base metaclass that defines the interface for N-dimensional datasets with associated meta information used in ``astropy``. All properties and ``__init__`` have to be overridden in subclasses. See `NDData` for a subclass that defines this interface on `numpy.ndarray`-like ``data``. See also: https://docs.astropy.org/en/stable/nddata/ """ @abstractmethod def __init__(self): pass @property @abstractmethod def data(self): """The stored dataset. """ pass @property @abstractmethod def mask(self): """Mask for the dataset. Masks should follow the ``numpy`` convention that **valid** data points are marked by ``False`` and **invalid** ones with ``True``. """ return None @property @abstractmethod def unit(self): """Unit for the dataset. """ return None @property @abstractmethod def wcs(self): """World coordinate system (WCS) for the dataset. """ return None @property @abstractmethod def meta(self): """Additional meta information about the dataset. Should be `dict`-like. """ return None @property @abstractmethod def uncertainty(self): """Uncertainty in the dataset. Should have an attribute ``uncertainty_type`` that defines what kind of uncertainty is stored, such as ``"std"`` for standard deviation or ``"var"`` for variance. """ return None
755afc3ac9cc3be9170ce9c36829012d03dbfc1bf5015cd6d8fdb903c867607c
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from abc import ABCMeta, abstractmethod from copy import deepcopy import weakref # from astropy.utils.compat import ignored from astropy import log from astropy.units import Unit, Quantity, UnitConversionError __all__ = ['MissingDataAssociationException', 'IncompatibleUncertaintiesException', 'NDUncertainty', 'StdDevUncertainty', 'UnknownUncertainty', 'VarianceUncertainty', 'InverseVariance'] class IncompatibleUncertaintiesException(Exception): """This exception should be used to indicate cases in which uncertainties with two different classes can not be propagated. """ class MissingDataAssociationException(Exception): """This exception should be used to indicate that an uncertainty instance has not been associated with a parent `~astropy.nddata.NDData` object. """ class NDUncertainty(metaclass=ABCMeta): """This is the metaclass for uncertainty classes used with `NDData`. Parameters ---------- array : any type, optional The array or value (the parameter name is due to historical reasons) of the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or `NDUncertainty` subclasses are recommended. If the `array` is `list`-like or `numpy.ndarray`-like it will be cast to a plain `numpy.ndarray`. Default is ``None``. unit : unit-like, optional Unit for the uncertainty ``array``. Strings that can be converted to a `~astropy.units.Unit` are allowed. Default is ``None``. copy : `bool`, optional Indicates whether to save the `array` as a copy. ``True`` copies it before saving, while ``False`` tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is ``True``. Raises ------ IncompatibleUncertaintiesException If given another `NDUncertainty`-like class as ``array`` if their ``uncertainty_type`` is different. """ def __init__(self, array=None, copy=True, unit=None): if isinstance(array, NDUncertainty): # Given an NDUncertainty class or subclass check that the type # is the same. if array.uncertainty_type != self.uncertainty_type: raise IncompatibleUncertaintiesException # Check if two units are given and take the explicit one then. if (unit is not None and unit != array._unit): # TODO : Clarify it (see NDData.init for same problem)? log.info("overwriting Uncertainty's current " "unit with specified unit.") elif array._unit is not None: unit = array.unit array = array.array elif isinstance(array, Quantity): # Check if two units are given and take the explicit one then. if (unit is not None and array.unit is not None and unit != array.unit): log.info("overwriting Quantity's current " "unit with specified unit.") elif array.unit is not None: unit = array.unit array = array.value if unit is None: self._unit = None else: self._unit = Unit(unit) if copy: array = deepcopy(array) unit = deepcopy(unit) self.array = array self.parent_nddata = None # no associated NDData - until it is set! @property @abstractmethod def uncertainty_type(self): """`str` : Short description of the type of uncertainty. Defined as abstract property so subclasses *have* to override this. """ return None @property def supports_correlated(self): """`bool` : Supports uncertainty propagation with correlated \ uncertainties? .. versionadded:: 1.2 """ return False @property def array(self): """`numpy.ndarray` : the uncertainty's value. """ return self._array @array.setter def array(self, value): if isinstance(value, (list, np.ndarray)): value = np.array(value, subok=False, copy=False) self._array = value @property def unit(self): """`~astropy.units.Unit` : The unit of the uncertainty, if any. """ return self._unit @unit.setter def unit(self, value): """ The unit should be set to a value consistent with the parent NDData unit and the uncertainty type. """ if value is not None: # Check the hidden attribute below, not the property. The property # raises an exception if there is no parent_nddata. if self._parent_nddata is not None: parent_unit = self.parent_nddata.unit try: # Check for consistency with the unit of the parent_nddata self._data_unit_to_uncertainty_unit(parent_unit).to(value) except UnitConversionError: raise UnitConversionError("Unit {} is incompatible " "with unit {} of parent " "nddata".format(value, parent_unit)) self._unit = Unit(value) else: self._unit = value @property def quantity(self): """ This uncertainty as an `~astropy.units.Quantity` object. """ return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype) @property def parent_nddata(self): """`NDData` : reference to `NDData` instance with this uncertainty. In case the reference is not set uncertainty propagation will not be possible since propagation might need the uncertain data besides the uncertainty. """ no_parent_message = "uncertainty is not associated with an NDData object" parent_lost_message = ( "the associated NDData object was deleted and cannot be accessed " "anymore. You can prevent the NDData object from being deleted by " "assigning it to a variable. If this happened after unpickling " "make sure you pickle the parent not the uncertainty directly." ) try: parent = self._parent_nddata except AttributeError: raise MissingDataAssociationException(no_parent_message) else: if parent is None: raise MissingDataAssociationException(no_parent_message) else: # The NDData is saved as weak reference so we must call it # to get the object the reference points to. However because # we have a weak reference here it's possible that the parent # was deleted because its reference count dropped to zero. if isinstance(self._parent_nddata, weakref.ref): resolved_parent = self._parent_nddata() if resolved_parent is None: log.info(parent_lost_message) return resolved_parent else: log.info("parent_nddata should be a weakref to an NDData " "object.") return self._parent_nddata @parent_nddata.setter def parent_nddata(self, value): if value is not None and not isinstance(value, weakref.ref): # Save a weak reference on the uncertainty that points to this # instance of NDData. Direct references should NOT be used: # https://github.com/astropy/astropy/pull/4799#discussion_r61236832 value = weakref.ref(value) # Set _parent_nddata here and access below with the property because value # is a weakref self._parent_nddata = value # set uncertainty unit to that of the parent if it was not already set, unless initializing # with empty parent (Value=None) if value is not None: parent_unit = self.parent_nddata.unit if self.unit is None: if parent_unit is None: self.unit = None else: # Set the uncertainty's unit to the appropriate value self.unit = self._data_unit_to_uncertainty_unit(parent_unit) else: # Check that units of uncertainty are compatible with those of # the parent. If they are, no need to change units of the # uncertainty or the data. If they are not, let the user know. unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit) try: unit_from_data.to(self.unit) except UnitConversionError: raise UnitConversionError("Unit {} of uncertainty " "incompatible with unit {} of " "data".format(self.unit, parent_unit)) @abstractmethod def _data_unit_to_uncertainty_unit(self, value): """ Subclasses must override this property. It should take in a data unit and return the correct unit for the uncertainty given the uncertainty type. """ return None def __repr__(self): prefix = self.__class__.__name__ + '(' try: body = np.array2string(self.array, separator=', ', prefix=prefix) except AttributeError: # In case it wasn't possible to use array2string body = str(self.array) return ''.join([prefix, body, ')']) def __getstate__(self): # Because of the weak reference the class wouldn't be picklable. try: return self._array, self._unit, self.parent_nddata except MissingDataAssociationException: # In case there's no parent return self._array, self._unit, None def __setstate__(self, state): if len(state) != 3: raise TypeError('The state should contain 3 items.') self._array = state[0] self._unit = state[1] parent = state[2] if parent is not None: parent = weakref.ref(parent) self._parent_nddata = parent def __getitem__(self, item): """Normal slicing on the array, keep the unit and return a reference. """ return self.__class__(self.array[item], unit=self.unit, copy=False) def propagate(self, operation, other_nddata, result_data, correlation): """Calculate the resulting uncertainty given an operation on the data. .. versionadded:: 1.2 Parameters ---------- operation : callable The operation that is performed on the `NDData`. Supported are `numpy.add`, `numpy.subtract`, `numpy.multiply` and `numpy.true_divide` (or `numpy.divide`). other_nddata : `NDData` instance The second operand in the arithmetic operation. result_data : `~astropy.units.Quantity` or ndarray The result of the arithmetic operations on the data. correlation : `numpy.ndarray` or number The correlation (rho) is defined between the uncertainties in sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means uncorrelated operands. Returns ------- resulting_uncertainty : `NDUncertainty` instance Another instance of the same `NDUncertainty` subclass containing the uncertainty of the result. Raises ------ ValueError If the ``operation`` is not supported or if correlation is not zero but the subclass does not support correlated uncertainties. Notes ----- First this method checks if a correlation is given and the subclass implements propagation with correlated uncertainties. Then the second uncertainty is converted (or an Exception is raised) to the same class in order to do the propagation. Then the appropriate propagation method is invoked and the result is returned. """ # Check if the subclass supports correlation if not self.supports_correlated: if isinstance(correlation, np.ndarray) or correlation != 0: raise ValueError("{} does not support uncertainty propagation" " with correlation." "".format(self.__class__.__name__)) # Get the other uncertainty (and convert it to a matching one) other_uncert = self._convert_uncertainty(other_nddata.uncertainty) if operation.__name__ == 'add': result = self._propagate_add(other_uncert, result_data, correlation) elif operation.__name__ == 'subtract': result = self._propagate_subtract(other_uncert, result_data, correlation) elif operation.__name__ == 'multiply': result = self._propagate_multiply(other_uncert, result_data, correlation) elif operation.__name__ in ['true_divide', 'divide']: result = self._propagate_divide(other_uncert, result_data, correlation) else: raise ValueError('unsupported operation') return self.__class__(result, copy=False) def _convert_uncertainty(self, other_uncert): """Checks if the uncertainties are compatible for propagation. Checks if the other uncertainty is `NDUncertainty`-like and if so verify that the uncertainty_type is equal. If the latter is not the case try returning ``self.__class__(other_uncert)``. Parameters ---------- other_uncert : `NDUncertainty` subclass The other uncertainty. Returns ------- other_uncert : `NDUncertainty` subclass but converted to a compatible `NDUncertainty` subclass if possible and necessary. Raises ------ IncompatibleUncertaintiesException: If the other uncertainty cannot be converted to a compatible `NDUncertainty` subclass. """ if isinstance(other_uncert, NDUncertainty): if self.uncertainty_type == other_uncert.uncertainty_type: return other_uncert else: return self.__class__(other_uncert) else: raise IncompatibleUncertaintiesException @abstractmethod def _propagate_add(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_subtract(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_multiply(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_divide(self, other_uncert, result_data, correlation): return None class UnknownUncertainty(NDUncertainty): """This class implements any unknown uncertainty type. The main purpose of having an unknown uncertainty class is to prevent uncertainty propagation. Parameters ---------- args, kwargs : see `NDUncertainty` """ @property def supports_correlated(self): """`False` : Uncertainty propagation is *not* possible for this class. """ return False @property def uncertainty_type(self): """``"unknown"`` : `UnknownUncertainty` implements any unknown \ uncertainty type. """ return 'unknown' def _data_unit_to_uncertainty_unit(self, value): """ No way to convert if uncertainty is unknown. """ return None def _convert_uncertainty(self, other_uncert): """Raise an Exception because unknown uncertainty types cannot implement propagation. """ msg = "Uncertainties of unknown type cannot be propagated." raise IncompatibleUncertaintiesException(msg) def _propagate_add(self, other_uncert, result_data, correlation): """Not possible for unknown uncertainty types. """ return None def _propagate_subtract(self, other_uncert, result_data, correlation): return None def _propagate_multiply(self, other_uncert, result_data, correlation): return None def _propagate_divide(self, other_uncert, result_data, correlation): return None class _VariancePropagationMixin: """ Propagation of uncertainties for variances, also used to perform error propagation for variance-like uncertainties (standard deviation and inverse variance). """ def _propagate_add_sub(self, other_uncert, result_data, correlation, subtract=False, to_variance=lambda x: x, from_variance=lambda x: x): """ Error propagation for addition or subtraction of variance or variance-like uncertainties. Uncertainties are calculated using the formulae for variance but can be used for uncertainty convertible to a variance. Parameters ---------- other_uncert : `~astropy.nddata.NDUncertainty` instance The uncertainty, if any, of the other operand. result_data : `~astropy.nddata.NDData` instance The results of the operation on the data. correlation : float or array-like Correlation of the uncertainties. subtract : bool, optional If ``True``, propagate for subtraction, otherwise propagate for addition. to_variance : function, optional Function that will transform the input uncertainties to variance. The default assumes the uncertainty is the variance. from_variance : function, optional Function that will convert from variance to the input uncertainty. The default assumes the uncertainty is the variance. """ if subtract: correlation_sign = -1 else: correlation_sign = 1 try: result_unit_sq = result_data.unit ** 2 except AttributeError: result_unit_sq = None if other_uncert.array is not None: # Formula: sigma**2 = dB if (other_uncert.unit is not None and result_unit_sq != to_variance(other_uncert.unit)): # If the other uncertainty has a unit and this unit differs # from the unit of the result convert it to the results unit other = to_variance(other_uncert.array << other_uncert.unit).to(result_unit_sq).value else: other = to_variance(other_uncert.array) else: other = 0 if self.array is not None: # Formula: sigma**2 = dA if self.unit is not None and to_variance(self.unit) != self.parent_nddata.unit**2: # If the uncertainty has a different unit than the result we # need to convert it to the results unit. this = to_variance(self.array << self.unit).to(result_unit_sq).value else: this = to_variance(self.array) else: this = 0 # Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB) # Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB) # (sign depends on whether addition or subtraction) # Determine the result depending on the correlation if isinstance(correlation, np.ndarray) or correlation != 0: corr = 2 * correlation * np.sqrt(this * other) result = this + other + correlation_sign * corr else: result = this + other return from_variance(result) def _propagate_multiply_divide(self, other_uncert, result_data, correlation, divide=False, to_variance=lambda x: x, from_variance=lambda x: x): """ Error propagation for multiplication or division of variance or variance-like uncertainties. Uncertainties are calculated using the formulae for variance but can be used for uncertainty convertible to a variance. Parameters ---------- other_uncert : `~astropy.nddata.NDUncertainty` instance The uncertainty, if any, of the other operand. result_data : `~astropy.nddata.NDData` instance The results of the operation on the data. correlation : float or array-like Correlation of the uncertainties. divide : bool, optional If ``True``, propagate for division, otherwise propagate for multiplication. to_variance : function, optional Function that will transform the input uncertainties to variance. The default assumes the uncertainty is the variance. from_variance : function, optional Function that will convert from variance to the input uncertainty. The default assumes the uncertainty is the variance. """ # For multiplication we don't need the result as quantity if isinstance(result_data, Quantity): result_data = result_data.value if divide: correlation_sign = -1 else: correlation_sign = 1 if other_uncert.array is not None: # We want the result to have a unit consistent with the parent, so # we only need to convert the unit of the other uncertainty if it # is different from its data's unit. if (other_uncert.unit and to_variance(1 * other_uncert.unit) != ((1 * other_uncert.parent_nddata.unit)**2).unit): d_b = to_variance(other_uncert.array << other_uncert.unit).to( (1 * other_uncert.parent_nddata.unit)**2).value else: d_b = to_variance(other_uncert.array) # Formula: sigma**2 = |A|**2 * d_b right = np.abs(self.parent_nddata.data**2 * d_b) else: right = 0 if self.array is not None: # Just the reversed case if (self.unit and to_variance(1 * self.unit) != ((1 * self.parent_nddata.unit)**2).unit): d_a = to_variance(self.array << self.unit).to( (1 * self.parent_nddata.unit)**2).value else: d_a = to_variance(self.array) # Formula: sigma**2 = |B|**2 * d_a left = np.abs(other_uncert.parent_nddata.data**2 * d_a) else: left = 0 # Multiplication # # The fundamental formula is: # sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor) # # This formula is not very handy since it generates NaNs for every # zero in A and B. So we rewrite it: # # Multiplication Formula: # sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB))) # sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB))) # # Division # # The fundamental formula for division is: # sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor) # # As with multiplication, it is convenient to rewrite this to avoid # nans where A is zero. # # Division formula (rewritten): # sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2 # - 2 * cor * A *sqrt(dAdB) / B**3 # sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2 # - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B # sigma**2 = multiplication formula/B**4 (and sign change in # the correlation) if isinstance(correlation, np.ndarray) or correlation != 0: corr = (2 * correlation * np.sqrt(d_a * d_b) * self.parent_nddata.data * other_uncert.parent_nddata.data) else: corr = 0 if divide: return from_variance((left + right + correlation_sign * corr) / other_uncert.parent_nddata.data**4) else: return from_variance(left + right + correlation_sign * corr) class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty): """Standard deviation uncertainty assuming first order gaussian error propagation. This class implements uncertainty propagation for ``addition``, ``subtraction``, ``multiplication`` and ``division`` with other instances of `StdDevUncertainty`. The class can handle if the uncertainty has a unit that differs from (but is convertible to) the parents `NDData` unit. The unit of the resulting uncertainty will have the same unit as the resulting data. Also support for correlation is possible but requires the correlation as input. It cannot handle correlation determination itself. Parameters ---------- args, kwargs : see `NDUncertainty` Examples -------- `StdDevUncertainty` should always be associated with an `NDData`-like instance, either by creating it during initialization:: >>> from astropy.nddata import NDData, StdDevUncertainty >>> ndd = NDData([1,2,3], unit='m', ... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1])) >>> ndd.uncertainty # doctest: +FLOAT_CMP StdDevUncertainty([0.1, 0.1, 0.1]) or by setting it manually on the `NDData` instance:: >>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True) >>> ndd.uncertainty # doctest: +FLOAT_CMP StdDevUncertainty([0.2]) the uncertainty ``array`` can also be set directly:: >>> ndd.uncertainty.array = 2 >>> ndd.uncertainty StdDevUncertainty(2) .. note:: The unit will not be displayed. """ @property def supports_correlated(self): """`True` : `StdDevUncertainty` allows to propagate correlated \ uncertainties. ``correlation`` must be given, this class does not implement computing it by itself. """ return True @property def uncertainty_type(self): """``"std"`` : `StdDevUncertainty` implements standard deviation. """ return 'std' def _convert_uncertainty(self, other_uncert): if isinstance(other_uncert, StdDevUncertainty): return other_uncert else: raise IncompatibleUncertaintiesException def _propagate_add(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=False, to_variance=np.square, from_variance=np.sqrt) def _propagate_subtract(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=True, to_variance=np.square, from_variance=np.sqrt) def _propagate_multiply(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=False, to_variance=np.square, from_variance=np.sqrt) def _propagate_divide(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=True, to_variance=np.square, from_variance=np.sqrt) def _data_unit_to_uncertainty_unit(self, value): return value class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty): """ Variance uncertainty assuming first order Gaussian error propagation. This class implements uncertainty propagation for ``addition``, ``subtraction``, ``multiplication`` and ``division`` with other instances of `VarianceUncertainty`. The class can handle if the uncertainty has a unit that differs from (but is convertible to) the parents `NDData` unit. The unit of the resulting uncertainty will be the square of the unit of the resulting data. Also support for correlation is possible but requires the correlation as input. It cannot handle correlation determination itself. Parameters ---------- args, kwargs : see `NDUncertainty` Examples -------- Compare this example to that in `StdDevUncertainty`; the uncertainties in the examples below are equivalent to the uncertainties in `StdDevUncertainty`. `VarianceUncertainty` should always be associated with an `NDData`-like instance, either by creating it during initialization:: >>> from astropy.nddata import NDData, VarianceUncertainty >>> ndd = NDData([1,2,3], unit='m', ... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01])) >>> ndd.uncertainty # doctest: +FLOAT_CMP VarianceUncertainty([0.01, 0.01, 0.01]) or by setting it manually on the `NDData` instance:: >>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True) >>> ndd.uncertainty # doctest: +FLOAT_CMP VarianceUncertainty([0.04]) the uncertainty ``array`` can also be set directly:: >>> ndd.uncertainty.array = 4 >>> ndd.uncertainty VarianceUncertainty(4) .. note:: The unit will not be displayed. """ @property def uncertainty_type(self): """``"var"`` : `VarianceUncertainty` implements variance. """ return 'var' @property def supports_correlated(self): """`True` : `VarianceUncertainty` allows to propagate correlated \ uncertainties. ``correlation`` must be given, this class does not implement computing it by itself. """ return True def _propagate_add(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=False) def _propagate_subtract(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=True) def _propagate_multiply(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=False) def _propagate_divide(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=True) def _data_unit_to_uncertainty_unit(self, value): return value ** 2 def _inverse(x): """Just a simple inverse for use in the InverseVariance""" return 1 / x class InverseVariance(_VariancePropagationMixin, NDUncertainty): """ Inverse variance uncertainty assuming first order Gaussian error propagation. This class implements uncertainty propagation for ``addition``, ``subtraction``, ``multiplication`` and ``division`` with other instances of `InverseVariance`. The class can handle if the uncertainty has a unit that differs from (but is convertible to) the parents `NDData` unit. The unit of the resulting uncertainty will the inverse square of the unit of the resulting data. Also support for correlation is possible but requires the correlation as input. It cannot handle correlation determination itself. Parameters ---------- args, kwargs : see `NDUncertainty` Examples -------- Compare this example to that in `StdDevUncertainty`; the uncertainties in the examples below are equivalent to the uncertainties in `StdDevUncertainty`. `InverseVariance` should always be associated with an `NDData`-like instance, either by creating it during initialization:: >>> from astropy.nddata import NDData, InverseVariance >>> ndd = NDData([1,2,3], unit='m', ... uncertainty=InverseVariance([100, 100, 100])) >>> ndd.uncertainty # doctest: +FLOAT_CMP InverseVariance([100, 100, 100]) or by setting it manually on the `NDData` instance:: >>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True) >>> ndd.uncertainty # doctest: +FLOAT_CMP InverseVariance([25]) the uncertainty ``array`` can also be set directly:: >>> ndd.uncertainty.array = 0.25 >>> ndd.uncertainty InverseVariance(0.25) .. note:: The unit will not be displayed. """ @property def uncertainty_type(self): """``"ivar"`` : `InverseVariance` implements inverse variance. """ return 'ivar' @property def supports_correlated(self): """`True` : `InverseVariance` allows to propagate correlated \ uncertainties. ``correlation`` must be given, this class does not implement computing it by itself. """ return True def _propagate_add(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=False, to_variance=_inverse, from_variance=_inverse) def _propagate_subtract(self, other_uncert, result_data, correlation): return super()._propagate_add_sub(other_uncert, result_data, correlation, subtract=True, to_variance=_inverse, from_variance=_inverse) def _propagate_multiply(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=False, to_variance=_inverse, from_variance=_inverse) def _propagate_divide(self, other_uncert, result_data, correlation): return super()._propagate_multiply_divide(other_uncert, result_data, correlation, divide=True, to_variance=_inverse, from_variance=_inverse) def _data_unit_to_uncertainty_unit(self, value): return 1 / value ** 2
ff59883f22824c331420b9264d4b127e61a4f98e8868562aef3a7eedf1c0455b
# Licensed under a 3-clause BSD style license - see LICENSE.rst from collections import OrderedDict import numpy as np from astropy.utils.misc import isiterable __all__ = ['FlagCollection'] class FlagCollection(OrderedDict): """ The purpose of this class is to provide a dictionary for containing arrays of flags for the `NDData` class. Flags should be stored in Numpy arrays that have the same dimensions as the parent data, so the `FlagCollection` class adds shape checking to an ordered dictionary class. The `FlagCollection` should be initialized like an `~collections.OrderedDict`, but with the addition of a ``shape=`` keyword argument used to pass the NDData shape. """ def __init__(self, *args, **kwargs): if 'shape' in kwargs: self.shape = kwargs.pop('shape') if not isiterable(self.shape): raise ValueError("FlagCollection shape should be " "an iterable object") else: raise Exception("FlagCollection should be initialized with " "the shape of the data") OrderedDict.__init__(self, *args, **kwargs) def __setitem__(self, item, value, **kwargs): if isinstance(value, np.ndarray): if value.shape == self.shape: OrderedDict.__setitem__(self, item, value, **kwargs) else: raise ValueError("flags array shape {} does not match data " "shape {}".format(value.shape, self.shape)) else: raise TypeError("flags should be given as a Numpy array")
18876075cd3805976c17b2d3df95dc03fdc9190bb87669f7d4d3106d28f55ea4
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base NDData class. import numpy as np from copy import deepcopy from .nddata_base import NDDataBase from .nduncertainty import NDUncertainty, UnknownUncertainty from astropy import log from astropy.units import Unit, Quantity from astropy.utils.metadata import MetaData from astropy.wcs.wcsapi import (BaseLowLevelWCS, BaseHighLevelWCS, SlicedLowLevelWCS, HighLevelWCSWrapper) __all__ = ['NDData'] _meta_doc = """`dict`-like : Additional meta information about the dataset.""" class NDData(NDDataBase): """ A container for `numpy.ndarray`-based datasets, using the `~astropy.nddata.NDDataBase` interface. The key distinction from raw `numpy.ndarray` is the presence of additional metadata such as uncertainty, mask, unit, a coordinate system and/or a dictionary containing further meta information. This class *only* provides a container for *storing* such datasets. For further functionality take a look at the ``See also`` section. See also: https://docs.astropy.org/en/stable/nddata/ Parameters ---------- data : `numpy.ndarray`-like or `NDData`-like The dataset. uncertainty : any type, optional Uncertainty in the dataset. Should have an attribute ``uncertainty_type`` that defines what kind of uncertainty is stored, for example ``"std"`` for standard deviation or ``"var"`` for variance. A metaclass defining such an interface is `NDUncertainty` - but isn't mandatory. If the uncertainty has no such attribute the uncertainty is stored as `UnknownUncertainty`. Defaults to ``None``. mask : any type, optional Mask for the dataset. Masks should follow the ``numpy`` convention that **valid** data points are marked by ``False`` and **invalid** ones with ``True``. Defaults to ``None``. wcs : any type, optional World coordinate system (WCS) for the dataset. Default is ``None``. meta : `dict`-like object, optional Additional meta information about the dataset. If no meta is provided an empty `collections.OrderedDict` is created. Default is ``None``. unit : unit-like, optional Unit for the dataset. Strings that can be converted to a `~astropy.units.Unit` are allowed. Default is ``None``. copy : `bool`, optional Indicates whether to save the arguments as copy. ``True`` copies every attribute before saving it while ``False`` tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is ``False``. .. versionadded:: 1.2 Raises ------ TypeError In case ``data`` or ``meta`` don't meet the restrictions. Notes ----- Each attribute can be accessed through the homonymous instance attribute: ``data`` in a `NDData` object can be accessed through the `data` attribute:: >>> from astropy.nddata import NDData >>> nd = NDData([1,2,3]) >>> nd.data array([1, 2, 3]) Given a conflicting implicit and an explicit parameter during initialization, for example the ``data`` is a `~astropy.units.Quantity` and the unit parameter is not ``None``, then the implicit parameter is replaced (without conversion) by the explicit one and a warning is issued:: >>> import numpy as np >>> import astropy.units as u >>> q = np.array([1,2,3,4]) * u.m >>> nd2 = NDData(q, unit=u.cm) INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata] >>> nd2.data # doctest: +FLOAT_CMP array([1., 2., 3., 4.]) >>> nd2.unit Unit("cm") See also -------- NDDataRef NDDataArray """ # Instead of a custom property use the MetaData descriptor also used for # Tables. It will check if the meta is dict-like or raise an exception. meta = MetaData(doc=_meta_doc, copy=False) def __init__(self, data, uncertainty=None, mask=None, wcs=None, meta=None, unit=None, copy=False): # Rather pointless since the NDDataBase does not implement any setting # but before the NDDataBase did call the uncertainty # setter. But if anyone wants to alter this behavior again the call # to the superclass NDDataBase should be in here. super().__init__() # Check if data is any type from which to collect some implicitly # passed parameters. if isinstance(data, NDData): # don't use self.__class__ (issue #4137) # Of course we need to check the data because subclasses with other # init-logic might be passed in here. We could skip these # tests if we compared for self.__class__ but that has other # drawbacks. # Comparing if there is an explicit and an implicit unit parameter. # If that is the case use the explicit one and issue a warning # that there might be a conflict. In case there is no explicit # unit just overwrite the unit parameter with the NDData.unit # and proceed as if that one was given as parameter. Same for the # other parameters. if (unit is not None and data.unit is not None and unit != data.unit): log.info("overwriting NDData's current " "unit with specified unit.") elif data.unit is not None: unit = data.unit if uncertainty is not None and data.uncertainty is not None: log.info("overwriting NDData's current " "uncertainty with specified uncertainty.") elif data.uncertainty is not None: uncertainty = data.uncertainty if mask is not None and data.mask is not None: log.info("overwriting NDData's current " "mask with specified mask.") elif data.mask is not None: mask = data.mask if wcs is not None and data.wcs is not None: log.info("overwriting NDData's current " "wcs with specified wcs.") elif data.wcs is not None: wcs = data.wcs if meta is not None and data.meta is not None: log.info("overwriting NDData's current " "meta with specified meta.") elif data.meta is not None: meta = data.meta data = data.data else: if hasattr(data, 'mask') and hasattr(data, 'data'): # Separating data and mask if mask is not None: log.info("overwriting Masked Objects's current " "mask with specified mask.") else: mask = data.mask # Just save the data for further processing, we could be given # a masked Quantity or something else entirely. Better to check # it first. data = data.data if isinstance(data, Quantity): if unit is not None and unit != data.unit: log.info("overwriting Quantity's current " "unit with specified unit.") else: unit = data.unit data = data.value # Quick check on the parameters if they match the requirements. if (not hasattr(data, 'shape') or not hasattr(data, '__getitem__') or not hasattr(data, '__array__')): # Data doesn't look like a numpy array, try converting it to # one. data = np.array(data, subok=True, copy=False) # Another quick check to see if what we got looks like an array # rather than an object (since numpy will convert a # non-numerical/non-string inputs to an array of objects). if data.dtype == 'O': raise TypeError("could not convert data to numpy array.") if unit is not None: unit = Unit(unit) if copy: # Data might have been copied before but no way of validating # without another variable. data = deepcopy(data) mask = deepcopy(mask) wcs = deepcopy(wcs) meta = deepcopy(meta) uncertainty = deepcopy(uncertainty) # Actually - copying the unit is unnecessary but better safe # than sorry :-) unit = deepcopy(unit) # Store the attributes self._data = data self.mask = mask self._wcs = None if wcs is not None: # Validate the wcs self.wcs = wcs self.meta = meta # TODO: Make this call the setter sometime self._unit = unit # Call the setter for uncertainty to further check the uncertainty self.uncertainty = uncertainty def __str__(self): data = str(self.data) unit = f" {self.unit}" if self.unit is not None else '' return data + unit def __repr__(self): prefix = self.__class__.__name__ + '(' data = np.array2string(self.data, separator=', ', prefix=prefix) unit = f", unit='{self.unit}'" if self.unit is not None else '' return ''.join((prefix, data, unit, ')')) @property def data(self): """ `~numpy.ndarray`-like : The stored dataset. """ return self._data @property def mask(self): """ any type : Mask for the dataset, if any. Masks should follow the ``numpy`` convention that valid data points are marked by ``False`` and invalid ones with ``True``. """ return self._mask @mask.setter def mask(self, value): self._mask = value @property def unit(self): """ `~astropy.units.Unit` : Unit for the dataset, if any. """ return self._unit @property def wcs(self): """ any type : A world coordinate system (WCS) for the dataset, if any. """ return self._wcs @wcs.setter def wcs(self, wcs): if self._wcs is not None and wcs is not None: raise ValueError("You can only set the wcs attribute with a WCS if no WCS is present.") if wcs is None or isinstance(wcs, BaseHighLevelWCS): self._wcs = wcs elif isinstance(wcs, BaseLowLevelWCS): self._wcs = HighLevelWCSWrapper(wcs) else: raise TypeError("The wcs argument must implement either the high or" " low level WCS API.") @property def uncertainty(self): """ any type : Uncertainty in the dataset, if any. Should have an attribute ``uncertainty_type`` that defines what kind of uncertainty is stored, such as ``'std'`` for standard deviation or ``'var'`` for variance. A metaclass defining such an interface is `~astropy.nddata.NDUncertainty` but isn't mandatory. """ return self._uncertainty @uncertainty.setter def uncertainty(self, value): if value is not None: # There is one requirements on the uncertainty: That # it has an attribute 'uncertainty_type'. # If it does not match this requirement convert it to an unknown # uncertainty. if not hasattr(value, 'uncertainty_type'): log.info('uncertainty should have attribute uncertainty_type.') value = UnknownUncertainty(value, copy=False) # If it is a subclass of NDUncertainty we must set the # parent_nddata attribute. (#4152) if isinstance(value, NDUncertainty): # In case the uncertainty already has a parent create a new # instance because we need to assume that we don't want to # steal the uncertainty from another NDData object if value._parent_nddata is not None: value = value.__class__(value, copy=False) # Then link it to this NDData instance (internally this needs # to be saved as weakref but that's done by NDUncertainty # setter). value.parent_nddata = self self._uncertainty = value
4842380e631836dca31c470023e22df4513f2eaa975e1608e79fa7d5cb0891ed
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The `astropy.nddata` subpackage provides the `~astropy.nddata.NDData` class and related tools to manage n-dimensional array-based data (e.g. CCD images, IFU Data, grid-based simulation data, ...). This is more than just `numpy.ndarray` objects, because it provides metadata that cannot be easily provided by a single array. """ from .nddata import * from .nddata_base import * from .nddata_withmixins import * from .nduncertainty import * from .flag_collection import * from .decorators import * from .mixins.ndarithmetic import * from .mixins.ndslicing import * from .mixins.ndio import * from .blocks import * from .compat import * from .utils import * from .ccddata import * from .bitmask import * from astropy import config as _config class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.nddata`. """ warn_unsupported_correlated = _config.ConfigItem( True, 'Whether to issue a warning if `~astropy.nddata.NDData` arithmetic ' 'is performed with uncertainties and the uncertainties do not ' 'support the propagation of correlated uncertainties.' ) warn_setting_unit_directly = _config.ConfigItem( True, 'Whether to issue a warning when the `~astropy.nddata.NDData` unit ' 'attribute is changed from a non-``None`` value to another value ' 'that data values/uncertainties are not scaled with the unit change.' ) conf = Conf()
907cde2f5c369e53db847bc491a8851c83631d4ed7b772638c9ffa6706736821
""" A module that provides functions for manipulating bit masks and data quality (DQ) arrays. """ import warnings import numbers from collections import OrderedDict import numpy as np __all__ = ['bitfield_to_boolean_mask', 'interpret_bit_flags', 'BitFlagNameMap', 'extend_bit_flag_map', 'InvalidBitFlag'] _ENABLE_BITFLAG_CACHING = True _MAX_UINT_TYPE = np.maximum_sctype(np.uint) _SUPPORTED_FLAGS = int(np.bitwise_not( 0, dtype=_MAX_UINT_TYPE, casting='unsafe' )) def _is_bit_flag(n): """ Verifies if the input number is a bit flag (i.e., an integer number that is an integer power of 2). Parameters ---------- n : int A positive integer number. Non-positive integers are considered not to be "flags". Returns ------- bool ``True`` if input ``n`` is a bit flag and ``False`` if it is not. """ if n < 1: return False return bin(n).count('1') == 1 def _is_int(n): return ( (isinstance(n, numbers.Integral) and not isinstance(n, bool)) or (isinstance(n, np.generic) and np.issubdtype(n, np.integer)) ) class InvalidBitFlag(ValueError): """ Indicates that a value is not an integer that is a power of 2. """ pass class BitFlag(int): """ Bit flags: integer values that are powers of 2. """ def __new__(cls, val, doc=None): if isinstance(val, tuple): if doc is not None: raise ValueError("Flag's doc string cannot be provided twice.") val, doc = val if not (_is_int(val) and _is_bit_flag(val)): raise InvalidBitFlag( "Value '{}' is not a valid bit flag: bit flag value must be " "an integral power of two.".format(val) ) s = int.__new__(cls, val) if doc is not None: s.__doc__ = doc return s class BitFlagNameMeta(type): def __new__(mcls, name, bases, members): for k, v in members.items(): if not k.startswith('_'): v = BitFlag(v) attr = [k for k in members.keys() if not k.startswith('_')] attrl = list(map(str.lower, attr)) if _ENABLE_BITFLAG_CACHING: cache = OrderedDict() for b in bases: for k, v in b.__dict__.items(): if k.startswith('_'): continue kl = k.lower() if kl in attrl: idx = attrl.index(kl) raise AttributeError("Bit flag '{:s}' was already defined." .format(attr[idx])) if _ENABLE_BITFLAG_CACHING: cache[kl] = v members = {k: v if k.startswith('_') else BitFlag(v) for k, v in members.items()} if _ENABLE_BITFLAG_CACHING: cache.update({k.lower(): v for k, v in members.items() if not k.startswith('_')}) members = {'_locked': True, '__version__': '', **members, '_cache': cache} else: members = {'_locked': True, '__version__': '', **members} return super().__new__(mcls, name, bases, members) def __setattr__(cls, name, val): if name == '_locked': return super().__setattr__(name, True) else: if name == '__version__': if cls._locked: raise AttributeError("Version cannot be modified.") return super().__setattr__(name, val) err_msg = f"Bit flags are read-only. Unable to reassign attribute {name}" if cls._locked: raise AttributeError(err_msg) namel = name.lower() if _ENABLE_BITFLAG_CACHING: if not namel.startswith('_') and namel in cls._cache: raise AttributeError(err_msg) else: for b in cls.__bases__: if not namel.startswith('_') and namel in list(map(str.lower, b.__dict__)): raise AttributeError(err_msg) if namel in list(map(str.lower, cls.__dict__)): raise AttributeError(err_msg) val = BitFlag(val) if _ENABLE_BITFLAG_CACHING and not namel.startswith('_'): cls._cache[namel] = val return super().__setattr__(name, val) def __getattr__(cls, name): if _ENABLE_BITFLAG_CACHING: flagnames = cls._cache else: flagnames = {k.lower(): v for k, v in cls.__dict__.items()} flagnames.update({k.lower(): v for b in cls.__bases__ for k, v in b.__dict__.items()}) try: return flagnames[name.lower()] except KeyError: raise AttributeError(f"Flag '{name}' not defined") def __getitem__(cls, key): return cls.__getattr__(key) def __add__(cls, items): if not isinstance(items, dict): if not isinstance(items[0], (tuple, list)): items = [items] items = dict(items) return extend_bit_flag_map( cls.__name__ + '_' + '_'.join([k for k in items]), cls, **items ) def __iadd__(cls, other): raise NotImplementedError( "Unary '+' is not supported. Use binary operator instead." ) def __delattr__(cls, name): raise AttributeError("{:s}: cannot delete {:s} member." .format(cls.__name__, cls.mro()[-2].__name__)) def __delitem__(cls, name): raise AttributeError("{:s}: cannot delete {:s} member." .format(cls.__name__, cls.mro()[-2].__name__)) def __repr__(cls): return f"<{cls.mro()[-2].__name__:s} '{cls.__name__:s}'>" class BitFlagNameMap(metaclass=BitFlagNameMeta): """ A base class for bit flag name maps used to describe data quality (DQ) flags of images by provinding a mapping from a mnemonic flag name to a flag value. Mapping for a specific instrument should subclass this class. Subclasses should define flags as class attributes with integer values that are powers of 2. Each bit flag may also contain a string comment following the flag value. Examples -------- >>> from astropy.nddata.bitmask import BitFlagNameMap >>> class ST_DQ(BitFlagNameMap): ... __version__ = '1.0.0' # optional ... CR = 1, 'Cosmic Ray' ... CLOUDY = 4 # no docstring comment ... RAINY = 8, 'Dome closed' ... >>> class ST_CAM1_DQ(ST_DQ): ... HOT = 16 ... DEAD = 32 """ pass def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs): """ A convenience function for creating bit flags maps by subclassing an existing map and adding additional flags supplied as keyword arguments. Parameters ---------- cls_name : str Class name of the bit flag map to be created. base_cls : BitFlagNameMap, optional Base class for the new bit flag map. **kwargs : int Each supplied keyword argument will be used to define bit flag names in the new map. In addition to bit flag names, ``__version__`` is allowed to indicate the version of the newly created map. Examples -------- >>> from astropy.nddata.bitmask import extend_bit_flag_map >>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8) >>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32) >>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys 16 >>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes 16 """ new_cls = BitFlagNameMeta.__new__( BitFlagNameMeta, cls_name, (base_cls, ), {'_locked': False} ) for k, v in kwargs.items(): try: setattr(new_cls, k, v) except AttributeError as e: if new_cls[k] != int(v): raise e new_cls._locked = True return new_cls def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None): """ Converts input bit flags to a single integer value (bit mask) or `None`. When input is a list of flags (either a Python list of integer flags or a string of comma-, ``'|'``-, or ``'+'``-separated list of flags), the returned bit mask is obtained by summing input flags. .. note:: In order to flip the bits of the returned bit mask, for input of `str` type, prepend '~' to the input string. '~' must be prepended to the *entire string* and not to each bit flag! For input that is already a bit mask or a Python list of bit flags, set ``flip_bits`` for `True` in order to flip the bits of the returned bit mask. Parameters ---------- bit_flags : int, str, list, None An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or ``'+'``-separated list of integer bit flags or mnemonic flag names, or a Python list of integer bit flags. If ``bit_flags`` is a `str` and if it is prepended with '~', then the output bit mask will have its bits flipped (compared to simple sum of input flags). For input ``bit_flags`` that is already a bit mask or a Python list of bit flags, bit-flipping can be controlled through ``flip_bits`` parameter. .. note:: When ``bit_flags`` is a list of flag names, the ``flag_name_map`` parameter must be provided. .. note:: Only one flag separator is supported at a time. ``bit_flags`` string should not mix ``','``, ``'+'``, and ``'|'`` separators. flip_bits : bool, None Indicates whether or not to flip the bits of the returned bit mask obtained from input bit flags. This parameter must be set to `None` when input ``bit_flags`` is either `None` or a Python list of flags. flag_name_map : BitFlagNameMap A `BitFlagNameMap` object that provides mapping from mnemonic bit flag names to integer bit values in order to translate mnemonic flags to numeric values when ``bit_flags`` that are comma- or '+'-separated list of menmonic bit flag names. Returns ------- bitmask : int or None Returns an integer bit mask formed from the input bit value or `None` if input ``bit_flags`` parameter is `None` or an empty string. If input string value was prepended with '~' (or ``flip_bits`` was set to `True`), then returned value will have its bits flipped (inverse mask). Examples -------- >>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map >>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32) >>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28)) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16')) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ)) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)', ... flag_name_map=ST_DQ)) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16])) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True)) '1111111111100011' """ has_flip_bits = flip_bits is not None flip_bits = bool(flip_bits) allow_non_flags = False if _is_int(bit_flags): return (~int(bit_flags) if flip_bits else int(bit_flags)) elif bit_flags is None: if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' must be set to 'None' when " "input 'bit_flags' is None." ) return None elif isinstance(bit_flags, str): if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' is not permitted for " "comma-separated string lists of bit flags. Prepend '~' to " "the string to indicate bit-flipping." ) bit_flags = str(bit_flags).strip() if bit_flags.upper() in ['', 'NONE', 'INDEF']: return None # check whether bitwise-NOT is present and if it is, check that it is # in the first position: bitflip_pos = bit_flags.find('~') if bitflip_pos == 0: flip_bits = True bit_flags = bit_flags[1:].lstrip() else: if bitflip_pos > 0: raise ValueError("Bitwise-NOT must precede bit flag list.") flip_bits = False # basic check for correct use of parenthesis: while True: nlpar = bit_flags.count('(') nrpar = bit_flags.count(')') if nlpar == 0 and nrpar == 0: break if nlpar != nrpar: raise ValueError("Unbalanced parentheses in bit flag list.") lpar_pos = bit_flags.find('(') rpar_pos = bit_flags.rfind(')') if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1): raise ValueError("Incorrect syntax (incorrect use of " "parenthesis) in bit flag list.") bit_flags = bit_flags[1:-1].strip() if sum(k in bit_flags for k in '+,|') > 1: raise ValueError( "Only one type of bit flag separator may be used in one " "expression. Allowed separators are: '+', '|', or ','." ) if ',' in bit_flags: bit_flags = bit_flags.split(',') elif '+' in bit_flags: bit_flags = bit_flags.split('+') elif '|' in bit_flags: bit_flags = bit_flags.split('|') else: if bit_flags == '': raise ValueError( "Empty bit flag lists not allowed when either bitwise-NOT " "or parenthesis are present." ) bit_flags = [bit_flags] if flag_name_map is not None: try: int(bit_flags[0]) except ValueError: bit_flags = [flag_name_map[f] for f in bit_flags] allow_non_flags = len(bit_flags) == 1 elif hasattr(bit_flags, '__iter__'): if not all([_is_int(flag) for flag in bit_flags]): if (flag_name_map is not None and all([isinstance(flag, str) for flag in bit_flags])): bit_flags = [flag_name_map[f] for f in bit_flags] else: raise TypeError("Every bit flag in a list must be either an " "integer flag value or a 'str' flag name.") else: raise TypeError("Unsupported type for argument 'bit_flags'.") bitset = set(map(int, bit_flags)) if len(bitset) != len(bit_flags): warnings.warn("Duplicate bit flags will be ignored") bitmask = 0 for v in bitset: if not _is_bit_flag(v) and not allow_non_flags: raise ValueError("Input list contains invalid (not powers of two) " "bit flag: {:d}".format(v)) bitmask += v if flip_bits: bitmask = ~bitmask return bitmask def bitfield_to_boolean_mask(bitfield, ignore_flags=0, flip_bits=None, good_mask_value=False, dtype=np.bool_, flag_name_map=None): """ bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \ good_mask_value=False, dtype=numpy.bool_) Converts an array of bit fields to a boolean (or integer) mask array according to a bit mask constructed from the supplied bit flags (see ``ignore_flags`` parameter). This function is particularly useful to convert data quality arrays to boolean masks with selective filtering of DQ flags. Parameters ---------- bitfield : ndarray An array of bit flags. By default, values different from zero are interpreted as "bad" values and values equal to zero are considered as "good" values. However, see ``ignore_flags`` parameter on how to selectively ignore some bits in the ``bitfield`` array data. ignore_flags : int, str, list, None (default = 0) An integer bit mask, `None`, a Python list of bit flags, a comma-, or ``'|'``-separated, ``'+'``-separated string list of integer bit flags or mnemonic flag names that indicate what bits in the input ``bitfield`` should be *ignored* (i.e., zeroed), or `None`. .. note:: When ``bit_flags`` is a list of flag names, the ``flag_name_map`` parameter must be provided. | Setting ``ignore_flags`` to `None` effectively will make `bitfield_to_boolean_mask` interpret all ``bitfield`` elements as "good" regardless of their value. | When ``ignore_flags`` argument is an integer bit mask, it will be combined using bitwise-NOT and bitwise-AND with each element of the input ``bitfield`` array (``~ignore_flags & bitfield``). If the resultant bitfield element is non-zero, that element will be interpreted as a "bad" in the output boolean mask and it will be interpreted as "good" otherwise. ``flip_bits`` parameter may be used to flip the bits (``bitwise-NOT``) of the bit mask thus effectively changing the meaning of the ``ignore_flags`` parameter from "ignore" to "use only" these flags. .. note:: Setting ``ignore_flags`` to 0 effectively will assume that all non-zero elements in the input ``bitfield`` array are to be interpreted as "bad". | When ``ignore_flags`` argument is a Python list of integer bit flags, these flags are added together to create an integer bit mask. Each item in the list must be a flag, i.e., an integer that is an integer power of 2. In order to flip the bits of the resultant bit mask, use ``flip_bits`` parameter. | Alternatively, ``ignore_flags`` may be a string of comma- or ``'+'``(or ``'|'``)-separated list of integer bit flags that should be added (bitwise OR) together to create an integer bit mask. For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent and indicate that bit flags 4 and 8 in the input ``bitfield`` array should be ignored when generating boolean mask. .. note:: ``'None'``, ``'INDEF'``, and empty (or all white space) strings are special values of string ``ignore_flags`` that are interpreted as `None`. .. note:: Each item in the list must be a flag, i.e., an integer that is an integer power of 2. In addition, for convenience, an arbitrary **single** integer is allowed and it will be interpreted as an integer bit mask. For example, instead of ``'4,8'`` one could simply provide string ``'12'``. .. note:: Only one flag separator is supported at a time. ``ignore_flags`` string should not mix ``','``, ``'+'``, and ``'|'`` separators. .. note:: When ``ignore_flags`` is a `str` and when it is prepended with '~', then the meaning of ``ignore_flags`` parameters will be reversed: now it will be interpreted as a list of bit flags to be *used* (or *not ignored*) when deciding which elements of the input ``bitfield`` array are "bad". Following this convention, an ``ignore_flags`` string value of ``'~0'`` would be equivalent to setting ``ignore_flags=None``. .. warning:: Because prepending '~' to a string ``ignore_flags`` is equivalent to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used with string ``ignore_flags`` and it must be set to `None`. flip_bits : bool, None (default = None) Specifies whether or not to invert the bits of the bit mask either supplied directly through ``ignore_flags`` parameter or built from the bit flags passed through ``ignore_flags`` (only when bit flags are passed as Python lists of integer bit flags). Occasionally, it may be useful to *consider only specific bit flags* in the ``bitfield`` array when creating a boolean mask as opposed to *ignoring* specific bit flags as ``ignore_flags`` behaves by default. This can be achieved by inverting/flipping the bits of the bit mask created from ``ignore_flags`` flags which effectively changes the meaning of the ``ignore_flags`` parameter from "ignore" to "use only" these flags. Setting ``flip_bits`` to `None` means that no bit flipping will be performed. Bit flipping for string lists of bit flags must be specified by prepending '~' to string bit flag lists (see documentation for ``ignore_flags`` for more details). .. warning:: This parameter can be set to either `True` or `False` **ONLY** when ``ignore_flags`` is either an integer bit mask or a Python list of integer bit flags. When ``ignore_flags`` is either `None` or a string list of flags, ``flip_bits`` **MUST** be set to `None`. good_mask_value : int, bool (default = False) This parameter is used to derive the values that will be assigned to the elements in the output boolean mask array that correspond to the "good" bit fields (that are 0 after zeroing bits specified by ``ignore_flags``) in the input ``bitfield`` array. When ``good_mask_value`` is non-zero or ``numpy.True_`` then values in the output boolean mask array corresponding to "good" bit fields in ``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``) or 1 (if ``dtype`` is of numerical type) and values of corresponding to "bad" flags will be ``numpy.False_`` (or 0). When ``good_mask_value`` is zero or ``numpy.False_`` then the values in the output boolean mask array corresponding to "good" bit fields in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is ``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values of corresponding to "bad" flags will be ``numpy.True_`` (or 1). dtype : data-type (default = ``numpy.bool_``) The desired data-type for the output binary mask array. flag_name_map : BitFlagNameMap A `BitFlagNameMap` object that provides mapping from mnemonic bit flag names to integer bit values in order to translate mnemonic flags to numeric values when ``bit_flags`` that are comma- or '+'-separated list of menmonic bit flag names. Returns ------- mask : ndarray Returns an array of the same dimensionality as the input ``bitfield`` array whose elements can have two possible values, e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer ``dtype``) according to values of to the input ``bitfield`` elements, ``ignore_flags`` parameter, and the ``good_mask_value`` parameter. Examples -------- >>> from astropy.nddata import bitmask >>> import numpy as np >>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0], ... [10, 4, 0, 0, 0, 16, 6, 0]]) >>> flag_map = bitmask.extend_bit_flag_map( ... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32 ... ) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0, ... dtype=int) array([[0, 0, 1, 1, 0, 1, 1, 0], [1, 1, 0, 0, 0, 1, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0, ... dtype=bool) array([[False, False, True, True, False, True, True, False], [ True, True, False, False, False, True, True, False]]...) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, ... good_mask_value=0, dtype=int) array([[0, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 0, 0, 1, 0, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6, ... good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int, ... flip_bits=True, good_mask_value=0) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)', ... good_mask_value=0, dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4], ... flip_bits=True, good_mask_value=0, ... dtype=int) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)', ... good_mask_value=0, dtype=int, ... flag_name_map=flag_map) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) >>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)', ... good_mask_value=0, dtype=int, ... flag_name_map=flag_map) array([[0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0]]) """ bitfield = np.asarray(bitfield) if not np.issubdtype(bitfield.dtype, np.integer): raise TypeError("Input bitfield array must be of integer type.") ignore_mask = interpret_bit_flags(ignore_flags, flip_bits=flip_bits, flag_name_map=flag_name_map) if ignore_mask is None: if good_mask_value: mask = np.ones_like(bitfield, dtype=dtype) else: mask = np.zeros_like(bitfield, dtype=dtype) return mask # filter out bits beyond the maximum supported by the data type: ignore_mask = ignore_mask & _SUPPORTED_FLAGS # invert the "ignore" mask: ignore_mask = np.bitwise_not(ignore_mask, dtype=bitfield.dtype.type, casting='unsafe') mask = np.empty_like(bitfield, dtype=np.bool_) np.bitwise_and(bitfield, ignore_mask, out=mask, casting='unsafe') if good_mask_value: np.logical_not(mask, out=mask) return mask.astype(dtype=dtype, subok=False, copy=False)
ba335620567022c3e090cfde84c671c4d0b7b2a86a9f15b06b6fb83ccee5f96f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module includes helper functions for array operations. """ import numpy as np from .decorators import support_nddata __all__ = ['reshape_as_blocks', 'block_reduce', 'block_replicate'] def _process_block_inputs(data, block_size): data = np.asanyarray(data) block_size = np.atleast_1d(block_size) if np.any(block_size <= 0): raise ValueError('block_size elements must be strictly positive') if data.ndim > 1 and len(block_size) == 1: block_size = np.repeat(block_size, data.ndim) if len(block_size) != data.ndim: raise ValueError('block_size must be a scalar or have the same ' 'length as the number of data dimensions') block_size_int = block_size.astype(int) if np.any(block_size_int != block_size): # e.g., 2.0 is OK, 2.1 is not raise ValueError('block_size elements must be integers') return data, block_size_int def reshape_as_blocks(data, block_size): """ Reshape a data array into blocks. This is useful to efficiently apply functions on block subsets of the data instead of using loops. The reshaped array is a view of the input data array. .. versionadded:: 4.1 Parameters ---------- data : ndarray The input data array. block_size : int or array-like (int) The integer block size along each axis. If ``block_size`` is a scalar and ``data`` has more than one dimension, then ``block_size`` will be used for for every axis. Each dimension of ``block_size`` must divide evenly into the corresponding dimension of ``data``. Returns ------- output : ndarray The reshaped array as a view of the input ``data`` array. Examples -------- >>> import numpy as np >>> from astropy.nddata import reshape_as_blocks >>> data = np.arange(16).reshape(4, 4) >>> data array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> reshape_as_blocks(data, (2, 2)) array([[[[ 0, 1], [ 4, 5]], [[ 2, 3], [ 6, 7]]], [[[ 8, 9], [12, 13]], [[10, 11], [14, 15]]]]) """ data, block_size = _process_block_inputs(data, block_size) if np.any(np.mod(data.shape, block_size) != 0): raise ValueError('Each dimension of block_size must divide evenly ' 'into the corresponding dimension of data') nblocks = np.array(data.shape) // block_size new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij) nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices block_idx = tuple(range(1, len(new_shape), 2)) # odd indices return data.reshape(new_shape).transpose(nblocks_idx + block_idx) @support_nddata def block_reduce(data, block_size, func=np.sum): """ Downsample a data array by applying a function to local blocks. If ``data`` is not perfectly divisible by ``block_size`` along a given axis then the data will be trimmed (from the end) along that axis. Parameters ---------- data : array-like The data to be resampled. block_size : int or array-like (int) The integer block size along each axis. If ``block_size`` is a scalar and ``data`` has more than one dimension, then ``block_size`` will be used for for every axis. func : callable, optional The method to use to downsample the data. Must be a callable that takes in a `~numpy.ndarray` along with an ``axis`` keyword, which defines the axis or axes along which the function is applied. The ``axis`` keyword must accept multiple axes as a tuple. The default is `~numpy.sum`, which provides block summation (and conserves the data sum). Returns ------- output : array-like The resampled data. Examples -------- >>> import numpy as np >>> from astropy.nddata import block_reduce >>> data = np.arange(16).reshape(4, 4) >>> block_reduce(data, 2) # doctest: +FLOAT_CMP array([[10, 18], [42, 50]]) >>> block_reduce(data, 2, func=np.mean) # doctest: +FLOAT_CMP array([[ 2.5, 4.5], [ 10.5, 12.5]]) """ data, block_size = _process_block_inputs(data, block_size) nblocks = np.array(data.shape) // block_size size_init = nblocks * block_size # evenly-divisible size # trim data if necessary for axis in range(data.ndim): if data.shape[axis] != size_init[axis]: data = data.swapaxes(0, axis) data = data[:size_init[axis]] data = data.swapaxes(0, axis) reshaped = reshape_as_blocks(data, block_size) axis = tuple(range(data.ndim, reshaped.ndim)) return func(reshaped, axis=axis) @support_nddata def block_replicate(data, block_size, conserve_sum=True): """ Upsample a data array by block replication. Parameters ---------- data : array-like The data to be block replicated. block_size : int or array-like (int) The integer block size along each axis. If ``block_size`` is a scalar and ``data`` has more than one dimension, then ``block_size`` will be used for for every axis. conserve_sum : bool, optional If `True` (the default) then the sum of the output block-replicated data will equal the sum of the input ``data``. Returns ------- output : array-like The block-replicated data. Examples -------- >>> import numpy as np >>> from astropy.nddata import block_replicate >>> data = np.array([[0., 1.], [2., 3.]]) >>> block_replicate(data, 2) # doctest: +FLOAT_CMP array([[0. , 0. , 0.25, 0.25], [0. , 0. , 0.25, 0.25], [0.5 , 0.5 , 0.75, 0.75], [0.5 , 0.5 , 0.75, 0.75]]) >>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP array([[0., 0., 1., 1.], [0., 0., 1., 1.], [2., 2., 3., 3.], [2., 2., 3., 3.]]) """ data, block_size = _process_block_inputs(data, block_size) for i in range(data.ndim): data = np.repeat(data, block_size[i], axis=i) if conserve_sum: data = data / float(np.prod(block_size)) return data
a652c11af8c77636655f20b94eb3eee2f33077e19981a9ca2202fb6ad2b3efac
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module implements the base CCDData class.""" import itertools import numpy as np from .compat import NDDataArray from .nduncertainty import ( StdDevUncertainty, NDUncertainty, VarianceUncertainty, InverseVariance) from astropy.io import fits, registry from astropy import units as u from astropy import log from astropy.wcs import WCS from astropy.utils.decorators import sharedmethod __all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer'] _known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance) _unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties} _unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties} # Global value which can turn on/off the unit requirements when creating a # CCDData. Should be used with care because several functions actually break # if the unit is None! _config_ccd_requires_unit = True def _arithmetic(op): """Decorator factory which temporarily disables the need for a unit when creating a new CCDData instance. The final result must have a unit. Parameters ---------- op : function The function to apply. Supported are: - ``np.add`` - ``np.subtract`` - ``np.multiply`` - ``np.true_divide`` Notes ----- Should only be used on CCDData ``add``, ``subtract``, ``divide`` or ``multiply`` because only these methods from NDArithmeticMixin are overwritten. """ def decorator(func): def inner(self, operand, operand2=None, **kwargs): global _config_ccd_requires_unit _config_ccd_requires_unit = False result = self._prepare_then_do_arithmetic(op, operand, operand2, **kwargs) # Wrap it again as CCDData so it checks the final unit. _config_ccd_requires_unit = True return result.__class__(result) inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`." return sharedmethod(inner) return decorator def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit): if uncertainty_type is StdDevUncertainty: return unit == parent_unit elif uncertainty_type is VarianceUncertainty: return unit == (parent_unit ** 2) elif uncertainty_type is InverseVariance: return unit == (1 / (parent_unit ** 2)) raise ValueError(f"unsupported uncertainty type: {uncertainty_type}") class CCDData(NDDataArray): """A class describing basic CCD data. The CCDData class is based on the NDData object and includes a data array, uncertainty frame, mask frame, flag frame, meta data, units, and WCS information for a single CCD image. Parameters ---------- data : `~astropy.nddata.CCDData`-like or array-like The actual data contained in this `~astropy.nddata.CCDData` object. Note that the data will always be saved by *reference*, so you should make a copy of the ``data`` before passing it in if that's the desired behavior. uncertainty : `~astropy.nddata.StdDevUncertainty`, \ `~astropy.nddata.VarianceUncertainty`, \ `~astropy.nddata.InverseVariance`, `numpy.ndarray` or \ None, optional Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`. Default is ``None``. mask : `numpy.ndarray` or None, optional Mask for the data, given as a boolean Numpy array with a shape matching that of the data. The values must be `False` where the data is *valid* and `True` when it is not (like Numpy masked arrays). If ``data`` is a numpy masked array, providing ``mask`` here will causes the mask from the masked array to be ignored. Default is ``None``. flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \ optional Flags giving information about each pixel. These can be specified either as a Numpy array of any type with a shape matching that of the data, or as a `~astropy.nddata.FlagCollection` instance which has a shape matching that of the data. Default is ``None``. wcs : `~astropy.wcs.WCS` or None, optional WCS-object containing the world coordinate system for the data. Default is ``None``. meta : dict-like object or None, optional Metadata for this object. "Metadata" here means all information that is included with this object but not part of any other attribute of this particular object, e.g. creation date, unique identifier, simulation parameters, exposure time, telescope name, etc. unit : `~astropy.units.Unit` or str, optional The units of the data. Default is ``None``. .. warning:: If the unit is ``None`` or not otherwise specified it will raise a ``ValueError`` Raises ------ ValueError If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g., match shape) onto ``data``. Methods ------- read(\\*args, \\**kwargs) ``Classmethod`` to create an CCDData instance based on a ``FITS`` file. This method uses :func:`fits_ccddata_reader` with the provided parameters. write(\\*args, \\**kwargs) Writes the contents of the CCDData instance into a new ``FITS`` file. This method uses :func:`fits_ccddata_writer` with the provided parameters. Attributes ---------- known_invalid_fits_unit_strings A dictionary that maps commonly-used fits unit name strings that are technically invalid to the correct valid unit type (or unit string). This is primarily for variant names like "ELECTRONS/S" which are not formally valid, but are unambiguous and frequently enough encountered that it is convenient to map them to the correct unit. Notes ----- `~astropy.nddata.CCDData` objects can be easily converted to a regular Numpy array using `numpy.asarray`. For example:: >>> from astropy.nddata import CCDData >>> import numpy as np >>> x = CCDData([1,2,3], unit='adu') >>> np.asarray(x) array([1, 2, 3]) This is useful, for example, when plotting a 2D image using matplotlib. >>> from astropy.nddata import CCDData >>> from matplotlib import pyplot as plt # doctest: +SKIP >>> x = CCDData([[1,2,3], [4,5,6]], unit='adu') >>> plt.imshow(x) # doctest: +SKIP """ def __init__(self, *args, **kwd): if 'meta' not in kwd: kwd['meta'] = kwd.pop('header', None) if 'header' in kwd: raise ValueError("can't have both header and meta.") super().__init__(*args, **kwd) if self._wcs is not None: llwcs = self._wcs.low_level_wcs if not isinstance(llwcs, WCS): raise TypeError("the wcs must be a WCS instance.") self._wcs = llwcs # Check if a unit is set. This can be temporarily disabled by the # _CCDDataUnit contextmanager. if _config_ccd_requires_unit and self.unit is None: raise ValueError("a unit for CCDData must be specified.") def _slice_wcs(self, item): """ Override the WCS slicing behaviour so that the wcs attribute continues to be an `astropy.wcs.WCS`. """ if self.wcs is None: return None try: return self.wcs[item] except Exception as err: self._handle_wcs_slicing_error(err, item) @property def data(self): return self._data @data.setter def data(self, value): self._data = value @property def wcs(self): return self._wcs @wcs.setter def wcs(self, value): if value is not None and not isinstance(value, WCS): raise TypeError("the wcs must be a WCS instance.") self._wcs = value @property def unit(self): return self._unit @unit.setter def unit(self, value): self._unit = u.Unit(value) @property def header(self): return self._meta @header.setter def header(self, value): self.meta = value @property def uncertainty(self): return self._uncertainty @uncertainty.setter def uncertainty(self, value): if value is not None: if isinstance(value, NDUncertainty): if getattr(value, '_parent_nddata', None) is not None: value = value.__class__(value, copy=False) self._uncertainty = value elif isinstance(value, np.ndarray): if value.shape != self.shape: raise ValueError("uncertainty must have same shape as " "data.") self._uncertainty = StdDevUncertainty(value) log.info("array provided for uncertainty; assuming it is a " "StdDevUncertainty.") else: raise TypeError("uncertainty must be an instance of a " "NDUncertainty object or a numpy array.") self._uncertainty.parent_nddata = self else: self._uncertainty = value def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT', hdu_flags=None, wcs_relax=True, key_uncertainty_type='UTYPE'): """Creates an HDUList object from a CCDData object. Parameters ---------- hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional If it is a string append this attribute to the HDUList as `~astropy.io.fits.ImageHDU` with the string as extension name. Flags are not supported at this time. If ``None`` this attribute is not appended. Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and ``None`` for flags. wcs_relax : bool Value of the ``relax`` parameter to use in converting the WCS to a FITS header using `~astropy.wcs.WCS.to_header`. The common ``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires ``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be preserved. key_uncertainty_type : str, optional The header key name for the class name of the uncertainty (if any) that is used to store the uncertainty type in the uncertainty hdu. Default is ``UTYPE``. .. versionadded:: 3.1 Raises ------ ValueError - If ``self.mask`` is set but not a `numpy.ndarray`. - If ``self.uncertainty`` is set but not a astropy uncertainty type. - If ``self.uncertainty`` is set but has another unit then ``self.data``. NotImplementedError Saving flags is not supported. Returns ------- hdulist : `~astropy.io.fits.HDUList` """ if isinstance(self.header, fits.Header): # Copy here so that we can modify the HDU header by adding WCS # information without changing the header of the CCDData object. header = self.header.copy() else: # Because _insert_in_metadata_fits_safe is written as a method # we need to create a dummy CCDData instance to hold the FITS # header we are constructing. This probably indicates that # _insert_in_metadata_fits_safe should be rewritten in a more # sensible way... dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu") for k, v in self.header.items(): dummy_ccd._insert_in_metadata_fits_safe(k, v) header = dummy_ccd.header if self.unit is not u.dimensionless_unscaled: header['bunit'] = self.unit.to_string() if self.wcs: # Simply extending the FITS header with the WCS can lead to # duplicates of the WCS keywords; iterating over the WCS # header should be safer. # # Turns out if I had read the io.fits.Header.extend docs more # carefully, I would have realized that the keywords exist to # avoid duplicates and preserve, as much as possible, the # structure of the commentary cards. # # Note that until astropy/astropy#3967 is closed, the extend # will fail if there are comment cards in the WCS header but # not header. wcs_header = self.wcs.to_header(relax=wcs_relax) header.extend(wcs_header, useblanks=False, update=True) hdus = [fits.PrimaryHDU(self.data, header)] if hdu_mask and self.mask is not None: # Always assuming that the mask is a np.ndarray (check that it has # a 'shape'). if not hasattr(self.mask, 'shape'): raise ValueError('only a numpy.ndarray mask can be saved.') # Convert boolean mask to uint since io.fits cannot handle bool. hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask) hdus.append(hduMask) if hdu_uncertainty and self.uncertainty is not None: # We need to save some kind of information which uncertainty was # used so that loading the HDUList can infer the uncertainty type. # No idea how this can be done so only allow StdDevUncertainty. uncertainty_cls = self.uncertainty.__class__ if uncertainty_cls not in _known_uncertainties: raise ValueError('only uncertainties of type {} can be saved.' .format(_known_uncertainties)) uncertainty_name = _unc_cls_to_name[uncertainty_cls] hdr_uncertainty = fits.Header() hdr_uncertainty[key_uncertainty_type] = uncertainty_name # Assuming uncertainty is an StdDevUncertainty save just the array # this might be problematic if the Uncertainty has a unit differing # from the data so abort for different units. This is important for # astropy > 1.2 if (hasattr(self.uncertainty, 'unit') and self.uncertainty.unit is not None): if not _uncertainty_unit_equivalent_to_parent( uncertainty_cls, self.uncertainty.unit, self.unit): raise ValueError( 'saving uncertainties with a unit that is not ' 'equivalent to the unit from the data unit is not ' 'supported.') hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty) hdus.append(hduUncert) if hdu_flags and self.flags: raise NotImplementedError('adding the flags to a HDU is not ' 'supported at this time.') hdulist = fits.HDUList(hdus) return hdulist def copy(self): """ Return a copy of the CCDData object. """ return self.__class__(self, copy=True) add = _arithmetic(np.add)(NDDataArray.add) subtract = _arithmetic(np.subtract)(NDDataArray.subtract) multiply = _arithmetic(np.multiply)(NDDataArray.multiply) divide = _arithmetic(np.true_divide)(NDDataArray.divide) def _insert_in_metadata_fits_safe(self, key, value): """ Insert key/value pair into metadata in a way that FITS can serialize. Parameters ---------- key : str Key to be inserted in dictionary. value : str or None Value to be inserted. Notes ----- This addresses a shortcoming of the FITS standard. There are length restrictions on both the ``key`` (8 characters) and ``value`` (72 characters) in the FITS standard. There is a convention for handling long keywords and a convention for handling long values, but the two conventions cannot be used at the same time. This addresses that case by checking the length of the ``key`` and ``value`` and, if necessary, shortening the key. """ if len(key) > 8 and len(value) > 72: short_name = key[:8] self.meta[f'HIERARCH {key.upper()}'] = ( short_name, f"Shortened name for {key}") self.meta[short_name] = value else: self.meta[key] = value # A dictionary mapping "known" invalid fits unit known_invalid_fits_unit_strings = {'ELECTRONS/S': u.electron/u.s, 'ELECTRONS': u.electron, 'electrons': u.electron} # These need to be importable by the tests... _KEEP_THESE_KEYWORDS_IN_HEADER = [ 'JD-OBS', 'MJD-OBS', 'DATE-OBS' ] _PCs = set(['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2']) _CDs = set(['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']) def _generate_wcs_and_update_header(hdr): """ Generate a WCS object from a header and remove the WCS-specific keywords from the header. Parameters ---------- hdr : astropy.io.fits.header or other dict-like Returns ------- new_header, wcs """ # Try constructing a WCS object. try: wcs = WCS(hdr) except Exception as exc: # Normally WCS only raises Warnings and doesn't fail but in rare # cases (malformed header) it could fail... log.info('An exception happened while extracting WCS information from ' 'the Header.\n{}: {}'.format(type(exc).__name__, str(exc))) return hdr, None # Test for success by checking to see if the wcs ctype has a non-empty # value, return None for wcs if ctype is empty. if not wcs.wcs.ctype[0]: return (hdr, None) new_hdr = hdr.copy() # If the keywords below are in the header they are also added to WCS. # It seems like they should *not* be removed from the header, though. wcs_header = wcs.to_header(relax=True) for k in wcs_header: if k not in _KEEP_THESE_KEYWORDS_IN_HEADER: new_hdr.remove(k, ignore_missing=True) # Check that this does not result in an inconsistent header WCS if the WCS # is converted back to a header. if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)): # The PCi_j representation is used by the astropy.wcs object, # so CDi_j keywords were not removed from new_hdr. Remove them now. for cd in _CDs: new_hdr.remove(cd, ignore_missing=True) # The other case -- CD in the header produced by astropy.wcs -- should # never happen based on [1], which computes the matrix in PC form. # [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596 # # The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does # check for the possibility that both PC and CD are present in the result # so if the implementation of to_header changes in wcslib in the future # then the tests should catch it, and then this code will need to be # updated. # We need to check for any SIP coefficients that got left behind if the # header has SIP. if wcs.sip is not None: keyword = '{}_{}_{}' polynomials = ['A', 'B', 'AP', 'BP'] for poly in polynomials: order = wcs.sip.__getattribute__(f'{poly.lower()}_order') for i, j in itertools.product(range(order), repeat=2): new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True) return (new_hdr, wcs) def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT', hdu_mask='MASK', hdu_flags=None, key_uncertainty_type='UTYPE', **kwd): """ Generate a CCDData object from a FITS file. Parameters ---------- filename : str Name of fits file. hdu : int, str, tuple of (str, int), optional Index or other identifier of the Header Data Unit of the FITS file from which CCDData should be initialized. If zero and no data in the primary HDU, it will search for the first extension HDU with data. The header will be added to the primary HDU. Default is ``0``. unit : `~astropy.units.Unit`, optional Units of the image data. If this argument is provided and there is a unit for the image in the FITS header (the keyword ``BUNIT`` is used as the unit, if present), this argument is used for the unit. Default is ``None``. hdu_uncertainty : str or None, optional FITS extension from which the uncertainty should be initialized. If the extension does not exist the uncertainty of the CCDData is ``None``. Default is ``'UNCERT'``. hdu_mask : str or None, optional FITS extension from which the mask should be initialized. If the extension does not exist the mask of the CCDData is ``None``. Default is ``'MASK'``. hdu_flags : str or None, optional Currently not implemented. Default is ``None``. key_uncertainty_type : str, optional The header key name where the class name of the uncertainty is stored in the hdu of the uncertainty (if any). Default is ``UTYPE``. .. versionadded:: 3.1 kwd : Any additional keyword parameters are passed through to the FITS reader in :mod:`astropy.io.fits`; see Notes for additional discussion. Notes ----- FITS files that contained scaled data (e.g. unsigned integer images) will be scaled and the keywords used to manage scaled data in :mod:`astropy.io.fits` are disabled. """ unsupport_open_keywords = { 'do_not_scale_image_data': 'Image data must be scaled.', 'scale_back': 'Scale information is not preserved.' } for key, msg in unsupport_open_keywords.items(): if key in kwd: prefix = f'unsupported keyword: {key}.' raise TypeError(' '.join([prefix, msg])) with fits.open(filename, **kwd) as hdus: hdr = hdus[hdu].header if hdu_uncertainty is not None and hdu_uncertainty in hdus: unc_hdu = hdus[hdu_uncertainty] stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None') # For compatibility reasons the default is standard deviation # uncertainty because files could have been created before the # uncertainty type was stored in the header. unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty) uncertainty = unc_type(unc_hdu.data) else: uncertainty = None if hdu_mask is not None and hdu_mask in hdus: # Mask is saved as uint but we want it to be boolean. mask = hdus[hdu_mask].data.astype(np.bool_) else: mask = None if hdu_flags is not None and hdu_flags in hdus: raise NotImplementedError('loading flags is currently not ' 'supported.') # search for the first instance with data if # the primary header is empty. if hdu == 0 and hdus[hdu].data is None: for i in range(len(hdus)): if (hdus.info(hdu)[i][3] == 'ImageHDU' and hdus.fileinfo(i)['datSpan'] > 0): hdu = i comb_hdr = hdus[hdu].header.copy() # Add header values from the primary header that aren't # present in the extension header. comb_hdr.extend(hdr, unique=True) hdr = comb_hdr log.info(f"first HDU with data is extension {hdu}.") break if 'bunit' in hdr: fits_unit_string = hdr['bunit'] # patch to handle FITS files using ADU for the unit instead of the # standard version of 'adu' if fits_unit_string.strip().lower() == 'adu': fits_unit_string = fits_unit_string.lower() else: fits_unit_string = None if fits_unit_string: if unit is None: # Convert the BUNIT header keyword to a unit and if that's not # possible raise a meaningful error message. try: kifus = CCDData.known_invalid_fits_unit_strings if fits_unit_string in kifus: fits_unit_string = kifus[fits_unit_string] fits_unit_string = u.Unit(fits_unit_string) except ValueError: raise ValueError( 'The Header value for the key BUNIT ({}) cannot be ' 'interpreted as valid unit. To successfully read the ' 'file as CCDData you can pass in a valid `unit` ' 'argument explicitly or change the header of the FITS ' 'file before reading it.' .format(fits_unit_string)) else: log.info("using the unit {} passed to the FITS reader instead " "of the unit {} in the FITS file." .format(unit, fits_unit_string)) use_unit = unit or fits_unit_string hdr, wcs = _generate_wcs_and_update_header(hdr) ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit, mask=mask, uncertainty=uncertainty, wcs=wcs) return ccd_data def fits_ccddata_writer( ccd_data, filename, hdu_mask='MASK', hdu_uncertainty='UNCERT', hdu_flags=None, key_uncertainty_type='UTYPE', **kwd): """ Write CCDData object to FITS file. Parameters ---------- filename : str Name of file. hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional If it is a string append this attribute to the HDUList as `~astropy.io.fits.ImageHDU` with the string as extension name. Flags are not supported at this time. If ``None`` this attribute is not appended. Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and ``None`` for flags. key_uncertainty_type : str, optional The header key name for the class name of the uncertainty (if any) that is used to store the uncertainty type in the uncertainty hdu. Default is ``UTYPE``. .. versionadded:: 3.1 kwd : All additional keywords are passed to :py:mod:`astropy.io.fits` Raises ------ ValueError - If ``self.mask`` is set but not a `numpy.ndarray`. - If ``self.uncertainty`` is set but not a `~astropy.nddata.StdDevUncertainty`. - If ``self.uncertainty`` is set but has another unit then ``self.data``. NotImplementedError Saving flags is not supported. """ hdu = ccd_data.to_hdu( hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty, key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags) hdu.writeto(filename, **kwd) with registry.delay_doc_updates(CCDData): registry.register_reader('fits', CCDData, fits_ccddata_reader) registry.register_writer('fits', CCDData, fits_ccddata_writer) registry.register_identifier('fits', CCDData, fits.connect.is_fits)
07114ad1ca82fdc4b4525fe065d85d344a7b6b7e245d8e74545cb1e08cb5df54
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing utilities. Not part of the public API!""" from astropy.wcs import WCS from astropy.wcs.wcsapi import BaseHighLevelWCS def assert_wcs_seem_equal(wcs1, wcs2): """Just checks a few attributes to make sure wcs instances seem to be equal. """ if wcs1 is None and wcs2 is None: return assert wcs1 is not None assert wcs2 is not None if isinstance(wcs1, BaseHighLevelWCS): wcs1 = wcs1.low_level_wcs if isinstance(wcs2, BaseHighLevelWCS): wcs2 = wcs2.low_level_wcs assert isinstance(wcs1, WCS) assert isinstance(wcs2, WCS) if wcs1 is wcs2: return assert wcs1.wcs.compare(wcs2.wcs) def _create_wcs_simple(naxis, ctype, crpix, crval, cdelt): wcs = WCS(naxis=naxis) wcs.wcs.crpix = crpix wcs.wcs.crval = crval wcs.wcs.cdelt = cdelt wcs.wcs.ctype = ctype return wcs def create_two_equal_wcs(naxis): return [ _create_wcs_simple( naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis, crval=[10]*naxis, cdelt=[1]*naxis), _create_wcs_simple( naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis, crval=[10]*naxis, cdelt=[1]*naxis) ] def create_two_unequal_wcs(naxis): return [ _create_wcs_simple( naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis, crval=[10]*naxis, cdelt=[1]*naxis), _create_wcs_simple( naxis=naxis, ctype=["m"]*naxis, crpix=[20]*naxis, crval=[20]*naxis, cdelt=[2]*naxis), ]
530e7aa46b684a9311378124bd8530b93460f350a15acb12dcfdd74ffd8c7b1c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module includes helper functions for array operations. """ from copy import deepcopy import numpy as np from astropy import units as u from astropy.coordinates import SkyCoord from astropy.utils import lazyproperty from astropy.wcs.utils import skycoord_to_pixel, proj_plane_pixel_scales from astropy.wcs import Sip __all__ = ['extract_array', 'add_array', 'subpixel_indices', 'overlap_slices', 'NoOverlapError', 'PartialOverlapError', 'Cutout2D'] class NoOverlapError(ValueError): '''Raised when determining the overlap of non-overlapping arrays.''' pass class PartialOverlapError(ValueError): '''Raised when arrays only partially overlap.''' pass def overlap_slices(large_array_shape, small_array_shape, position, mode='partial'): """ Get slices for the overlapping part of a small and a large array. Given a certain position of the center of the small array, with respect to the large array, tuples of slices are returned which can be used to extract, add or subtract the small array at the given position. This function takes care of the correct behavior at the boundaries, where the small array is cut of appropriately. Integer positions are at the pixel centers. Parameters ---------- large_array_shape : tuple of int or int The shape of the large array (for 1D arrays, this can be an `int`). small_array_shape : int or tuple thereof The shape of the small array (for 1D arrays, this can be an `int`). See the ``mode`` keyword for additional details. position : number or tuple thereof The position of the small array's center with respect to the large array. The pixel coordinates should be in the same order as the array shape. Integer positions are at the pixel centers. For any axis where ``small_array_shape`` is even, the position is rounded up, e.g. extracting two elements with a center of ``1`` will define the extracted region as ``[0, 1]``. mode : {'partial', 'trim', 'strict'}, optional In ``'partial'`` mode, a partial overlap of the small and the large array is sufficient. The ``'trim'`` mode is similar to the ``'partial'`` mode, but ``slices_small`` will be adjusted to return only the overlapping elements. In the ``'strict'`` mode, the small array has to be fully contained in the large array, otherwise an `~astropy.nddata.utils.PartialOverlapError` is raised. In all modes, non-overlapping arrays will raise a `~astropy.nddata.utils.NoOverlapError`. Returns ------- slices_large : tuple of slice A tuple of slice objects for each axis of the large array, such that ``large_array[slices_large]`` extracts the region of the large array that overlaps with the small array. slices_small : tuple of slice A tuple of slice objects for each axis of the small array, such that ``small_array[slices_small]`` extracts the region that is inside the large array. """ if mode not in ['partial', 'trim', 'strict']: raise ValueError('Mode can be only "partial", "trim", or "strict".') if np.isscalar(small_array_shape): small_array_shape = (small_array_shape, ) if np.isscalar(large_array_shape): large_array_shape = (large_array_shape, ) if np.isscalar(position): position = (position, ) if any(~np.isfinite(position)): raise ValueError('Input position contains invalid values (NaNs or ' 'infs).') if len(small_array_shape) != len(large_array_shape): raise ValueError('"large_array_shape" and "small_array_shape" must ' 'have the same number of dimensions.') if len(small_array_shape) != len(position): raise ValueError('"position" must have the same number of dimensions ' 'as "small_array_shape".') # define the min/max pixel indices indices_min = [int(np.ceil(pos - (small_shape / 2.))) for (pos, small_shape) in zip(position, small_array_shape)] indices_max = [int(np.ceil(pos + (small_shape / 2.))) for (pos, small_shape) in zip(position, small_array_shape)] for e_max in indices_max: if e_max < 0: raise NoOverlapError('Arrays do not overlap.') for e_min, large_shape in zip(indices_min, large_array_shape): if e_min >= large_shape: raise NoOverlapError('Arrays do not overlap.') if mode == 'strict': for e_min in indices_min: if e_min < 0: raise PartialOverlapError('Arrays overlap only partially.') for e_max, large_shape in zip(indices_max, large_array_shape): if e_max > large_shape: raise PartialOverlapError('Arrays overlap only partially.') # Set up slices slices_large = tuple(slice(max(0, indices_min), min(large_shape, indices_max)) for (indices_min, indices_max, large_shape) in zip(indices_min, indices_max, large_array_shape)) if mode == 'trim': slices_small = tuple(slice(0, slc.stop - slc.start) for slc in slices_large) else: slices_small = tuple(slice(max(0, -indices_min), min(large_shape - indices_min, indices_max - indices_min)) for (indices_min, indices_max, large_shape) in zip(indices_min, indices_max, large_array_shape)) return slices_large, slices_small def extract_array(array_large, shape, position, mode='partial', fill_value=np.nan, return_position=False): """ Extract a smaller array of the given shape and position from a larger array. Parameters ---------- array_large : ndarray The array from which to extract the small array. shape : int or tuple thereof The shape of the extracted array (for 1D arrays, this can be an `int`). See the ``mode`` keyword for additional details. position : number or tuple thereof The position of the small array's center with respect to the large array. The pixel coordinates should be in the same order as the array shape. Integer positions are at the pixel centers (for 1D arrays, this can be a number). mode : {'partial', 'trim', 'strict'}, optional The mode used for extracting the small array. For the ``'partial'`` and ``'trim'`` modes, a partial overlap of the small array and the large array is sufficient. For the ``'strict'`` mode, the small array has to be fully contained within the large array, otherwise an `~astropy.nddata.utils.PartialOverlapError` is raised. In all modes, non-overlapping arrays will raise a `~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode, positions in the small array that do not overlap with the large array will be filled with ``fill_value``. In ``'trim'`` mode only the overlapping elements are returned, thus the resulting small array may be smaller than the requested ``shape``. fill_value : number, optional If ``mode='partial'``, the value to fill pixels in the extracted small array that do not overlap with the input ``array_large``. ``fill_value`` will be changed to have the same ``dtype`` as the ``array_large`` array, with one exception. If ``array_large`` has integer type and ``fill_value`` is ``np.nan``, then a `ValueError` will be raised. return_position : bool, optional If `True`, return the coordinates of ``position`` in the coordinate system of the returned array. Returns ------- array_small : ndarray The extracted array. new_position : tuple If ``return_position`` is true, this tuple will contain the coordinates of the input ``position`` in the coordinate system of ``array_small``. Note that for partially overlapping arrays, ``new_position`` might actually be outside of the ``array_small``; ``array_small[new_position]`` might give wrong results if any element in ``new_position`` is negative. Examples -------- We consider a large array with the shape 11x10, from which we extract a small array of shape 3x5: >>> import numpy as np >>> from astropy.nddata.utils import extract_array >>> large_array = np.arange(110).reshape((11, 10)) >>> extract_array(large_array, (3, 5), (7, 7)) array([[65, 66, 67, 68, 69], [75, 76, 77, 78, 79], [85, 86, 87, 88, 89]]) """ if np.isscalar(shape): shape = (shape, ) if np.isscalar(position): position = (position, ) if mode not in ['partial', 'trim', 'strict']: raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.") large_slices, small_slices = overlap_slices(array_large.shape, shape, position, mode=mode) extracted_array = array_large[large_slices] if return_position: new_position = [i - s.start for i, s in zip(position, large_slices)] # Extracting on the edges is presumably a rare case, so treat special here if (extracted_array.shape != shape) and (mode == 'partial'): extracted_array = np.zeros(shape, dtype=array_large.dtype) try: extracted_array[:] = fill_value except ValueError as exc: exc.args += ('fill_value is inconsistent with the data type of ' 'the input array (e.g., fill_value cannot be set to ' 'np.nan if the input array has integer type). Please ' 'change either the input array dtype or the ' 'fill_value.',) raise exc extracted_array[small_slices] = array_large[large_slices] if return_position: new_position = [i + s.start for i, s in zip(new_position, small_slices)] if return_position: return extracted_array, tuple(new_position) else: return extracted_array def add_array(array_large, array_small, position): """ Add a smaller array at a given position in a larger array. Parameters ---------- array_large : ndarray Large array. array_small : ndarray Small array to add. Can be equal to ``array_large`` in size in a given dimension, but not larger. position : tuple Position of the small array's center, with respect to the large array. Coordinates should be in the same order as the array shape. Returns ------- new_array : ndarray The new array formed from the sum of ``array_large`` and ``array_small``. Notes ----- The addition is done in-place. Examples -------- We consider a large array of zeros with the shape 5x5 and a small array of ones with a shape of 3x3: >>> import numpy as np >>> from astropy.nddata.utils import add_array >>> large_array = np.zeros((5, 5)) >>> small_array = np.ones((3, 3)) >>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP array([[0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]) """ # Check if large array is not smaller if all(large_shape >= small_shape for (large_shape, small_shape) in zip(array_large.shape, array_small.shape)): large_slices, small_slices = overlap_slices(array_large.shape, array_small.shape, position) array_large[large_slices] += array_small[small_slices] return array_large else: raise ValueError("Can't add array. Small array too large.") def subpixel_indices(position, subsampling): """ Convert decimal points to indices, given a subsampling factor. This discards the integer part of the position and uses only the decimal place, and converts this to a subpixel position depending on the subsampling specified. The center of a pixel corresponds to an integer position. Parameters ---------- position : ndarray or array-like Positions in pixels. subsampling : int Subsampling factor per pixel. Returns ------- indices : ndarray The integer subpixel indices corresponding to the input positions. Examples -------- If no subsampling is used, then the subpixel indices returned are always 0: >>> from astropy.nddata.utils import subpixel_indices >>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP array([0., 0., 0.]) If instead we use a subsampling of 2, we see that for the two first values (1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and 3.4 lie in the left part of the pixels and 5.6 lies in the right part. >>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP array([1., 1., 0.]) """ # Get decimal points fractions = np.modf(np.asanyarray(position) + 0.5)[0] return np.floor(fractions * subsampling) class Cutout2D: """ Create a cutout object from a 2D array. The returned object will contain a 2D cutout array. If ``copy=False`` (default), the cutout array is a view into the original ``data`` array, otherwise the cutout array will contain a copy of the original data. If a `~astropy.wcs.WCS` object is input, then the returned object will also contain a copy of the original WCS, but updated for the cutout array. For example usage, see :ref:`astropy:cutout_images`. .. warning:: The cutout WCS object does not currently handle cases where the input WCS object contains distortion lookup tables described in the `FITS WCS distortion paper <https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__. Parameters ---------- data : ndarray The 2D data array from which to extract the cutout array. position : tuple or `~astropy.coordinates.SkyCoord` The position of the cutout array's center with respect to the ``data`` array. The position can be specified either as a ``(x, y)`` tuple of pixel coordinates or a `~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a required input. size : int, array-like, or `~astropy.units.Quantity` The size of the cutout array along each axis. If ``size`` is a scalar number or a scalar `~astropy.units.Quantity`, then a square cutout of ``size`` will be created. If ``size`` has two elements, they should be in ``(ny, nx)`` order. Scalar numbers in ``size`` are assumed to be in units of pixels. ``size`` can also be a `~astropy.units.Quantity` object or contain `~astropy.units.Quantity` objects. Such `~astropy.units.Quantity` objects must be in pixel or angular units. For all cases, ``size`` will be converted to an integer number of pixels, rounding the the nearest integer. See the ``mode`` keyword for additional details on the final cutout size. .. note:: If ``size`` is in angular units, the cutout size is converted to pixels using the pixel scales along each axis of the image at the ``CRPIX`` location. Projection and other non-linear distortions are not taken into account. wcs : `~astropy.wcs.WCS`, optional A WCS object associated with the input ``data`` array. If ``wcs`` is not `None`, then the returned cutout object will contain a copy of the updated WCS for the cutout data array. mode : {'trim', 'partial', 'strict'}, optional The mode used for creating the cutout data array. For the ``'partial'`` and ``'trim'`` modes, a partial overlap of the cutout array and the input ``data`` array is sufficient. For the ``'strict'`` mode, the cutout array has to be fully contained within the ``data`` array, otherwise an `~astropy.nddata.utils.PartialOverlapError` is raised. In all modes, non-overlapping arrays will raise a `~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode, positions in the cutout array that do not overlap with the ``data`` array will be filled with ``fill_value``. In ``'trim'`` mode only the overlapping elements are returned, thus the resulting cutout array may be smaller than the requested ``shape``. fill_value : float or int, optional If ``mode='partial'``, the value to fill pixels in the cutout array that do not overlap with the input ``data``. ``fill_value`` must have the same ``dtype`` as the input ``data`` array. copy : bool, optional If `False` (default), then the cutout data will be a view into the original ``data`` array. If `True`, then the cutout data will hold a copy of the original ``data`` array. Attributes ---------- data : 2D `~numpy.ndarray` The 2D cutout array. shape : (2,) tuple The ``(ny, nx)`` shape of the cutout array. shape_input : (2,) tuple The ``(ny, nx)`` shape of the input (original) array. input_position_cutout : (2,) tuple The (unrounded) ``(x, y)`` position with respect to the cutout array. input_position_original : (2,) tuple The original (unrounded) ``(x, y)`` input position (with respect to the original array). slices_original : (2,) tuple of slice object A tuple of slice objects for the minimal bounding box of the cutout with respect to the original array. For ``mode='partial'``, the slices are for the valid (non-filled) cutout values. slices_cutout : (2,) tuple of slice object A tuple of slice objects for the minimal bounding box of the cutout with respect to the cutout array. For ``mode='partial'``, the slices are for the valid (non-filled) cutout values. xmin_original, ymin_original, xmax_original, ymax_original : float The minimum and maximum ``x`` and ``y`` indices of the minimal rectangular region of the cutout array with respect to the original array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. These values are the same as those in `bbox_original`. xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float The minimum and maximum ``x`` and ``y`` indices of the minimal rectangular region of the cutout array with respect to the cutout array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. These values are the same as those in `bbox_cutout`. wcs : `~astropy.wcs.WCS` or None A WCS object associated with the cutout array if a ``wcs`` was input. Examples -------- >>> import numpy as np >>> from astropy.nddata.utils import Cutout2D >>> from astropy import units as u >>> data = np.arange(20.).reshape(5, 4) >>> cutout1 = Cutout2D(data, (2, 2), (3, 3)) >>> print(cutout1.data) # doctest: +FLOAT_CMP [[ 5. 6. 7.] [ 9. 10. 11.] [13. 14. 15.]] >>> print(cutout1.center_original) (2.0, 2.0) >>> print(cutout1.center_cutout) (1.0, 1.0) >>> print(cutout1.origin_original) (1, 1) >>> cutout2 = Cutout2D(data, (2, 2), 3) >>> print(cutout2.data) # doctest: +FLOAT_CMP [[ 5. 6. 7.] [ 9. 10. 11.] [13. 14. 15.]] >>> size = u.Quantity([3, 3], u.pixel) >>> cutout3 = Cutout2D(data, (0, 0), size) >>> print(cutout3.data) # doctest: +FLOAT_CMP [[0. 1.] [4. 5.]] >>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3)) >>> print(cutout4.data) # doctest: +FLOAT_CMP [[0. 1.] [4. 5.]] >>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial') >>> print(cutout5.data) # doctest: +FLOAT_CMP [[nan nan nan] [nan 0. 1.] [nan 4. 5.]] """ def __init__(self, data, position, size, wcs=None, mode='trim', fill_value=np.nan, copy=False): if wcs is None: wcs = getattr(data, 'wcs', None) if isinstance(position, SkyCoord): if wcs is None: raise ValueError('wcs must be input if position is a ' 'SkyCoord') position = skycoord_to_pixel(position, wcs, mode='all') # (x, y) if np.isscalar(size): size = np.repeat(size, 2) # special handling for a scalar Quantity if isinstance(size, u.Quantity): size = np.atleast_1d(size) if len(size) == 1: size = np.repeat(size, 2) if len(size) > 2: raise ValueError('size must have at most two elements') shape = np.zeros(2).astype(int) pixel_scales = None # ``size`` can have a mixture of int and Quantity (and even units), # so evaluate each axis separately for axis, side in enumerate(size): if not isinstance(side, u.Quantity): shape[axis] = int(np.round(size[axis])) # pixels else: if side.unit == u.pixel: shape[axis] = int(np.round(side.value)) elif side.unit.physical_type == 'angle': if wcs is None: raise ValueError('wcs must be input if any element ' 'of size has angular units') if pixel_scales is None: pixel_scales = u.Quantity( proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis]) shape[axis] = int(np.round( (side / pixel_scales[axis]).decompose())) else: raise ValueError('shape can contain Quantities with only ' 'pixel or angular units') data = np.asanyarray(data) # reverse position because extract_array and overlap_slices # use (y, x), but keep the input position pos_yx = position[::-1] cutout_data, input_position_cutout = extract_array( data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value, return_position=True) if copy: cutout_data = np.copy(cutout_data) self.data = cutout_data self.input_position_cutout = input_position_cutout[::-1] # (x, y) slices_original, slices_cutout = overlap_slices( data.shape, shape, pos_yx, mode=mode) self.slices_original = slices_original self.slices_cutout = slices_cutout self.shape = self.data.shape self.input_position_original = position self.shape_input = shape ((self.ymin_original, self.ymax_original), (self.xmin_original, self.xmax_original)) = self.bbox_original ((self.ymin_cutout, self.ymax_cutout), (self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout # the true origin pixel of the cutout array, including any # filled cutout values self._origin_original_true = ( self.origin_original[0] - self.slices_cutout[1].start, self.origin_original[1] - self.slices_cutout[0].start) if wcs is not None: self.wcs = deepcopy(wcs) self.wcs.wcs.crpix -= self._origin_original_true self.wcs.array_shape = self.data.shape if wcs.sip is not None: self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b, wcs.sip.ap, wcs.sip.bp, wcs.sip.crpix - self._origin_original_true) else: self.wcs = None def to_original_position(self, cutout_position): """ Convert an ``(x, y)`` position in the cutout array to the original ``(x, y)`` position in the original large array. Parameters ---------- cutout_position : tuple The ``(x, y)`` pixel position in the cutout array. Returns ------- original_position : tuple The corresponding ``(x, y)`` pixel position in the original large array. """ return tuple(cutout_position[i] + self.origin_original[i] for i in [0, 1]) def to_cutout_position(self, original_position): """ Convert an ``(x, y)`` position in the original large array to the ``(x, y)`` position in the cutout array. Parameters ---------- original_position : tuple The ``(x, y)`` pixel position in the original large array. Returns ------- cutout_position : tuple The corresponding ``(x, y)`` pixel position in the cutout array. """ return tuple(original_position[i] - self.origin_original[i] for i in [0, 1]) def plot_on_original(self, ax=None, fill=False, **kwargs): """ Plot the cutout region on a matplotlib Axes instance. Parameters ---------- ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the cutout patch. The default is `False`. kwargs : optional Any keyword arguments accepted by `matplotlib.patches.Patch`. Returns ------- ax : `matplotlib.axes.Axes` instance The matplotlib Axes instance constructed in the method if ``ax=None``. Otherwise the output ``ax`` is the same as the input ``ax``. """ import matplotlib.pyplot as plt import matplotlib.patches as mpatches kwargs['fill'] = fill if ax is None: ax = plt.gca() height, width = self.shape hw, hh = width / 2., height / 2. pos_xy = self.position_original - np.array([hw, hh]) patch = mpatches.Rectangle(pos_xy, width, height, 0., **kwargs) ax.add_patch(patch) return ax @staticmethod def _calc_center(slices): """ Calculate the center position. The center position will be fractional for even-sized arrays. For ``mode='partial'``, the central position is calculated for the valid (non-filled) cutout values. """ return tuple(0.5 * (slices[i].start + slices[i].stop - 1) for i in [1, 0]) @staticmethod def _calc_bbox(slices): """ Calculate a minimal bounding box in the form ``((ymin, ymax), (xmin, xmax))``. Note these are pixel locations, not slice indices. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. """ # (stop - 1) to return the max pixel location, not the slice index return ((slices[0].start, slices[0].stop - 1), (slices[1].start, slices[1].stop - 1)) @lazyproperty def origin_original(self): """ The ``(x, y)`` index of the origin pixel of the cutout with respect to the original array. For ``mode='partial'``, the origin pixel is calculated for the valid (non-filled) cutout values. """ return (self.slices_original[1].start, self.slices_original[0].start) @lazyproperty def origin_cutout(self): """ The ``(x, y)`` index of the origin pixel of the cutout with respect to the cutout array. For ``mode='partial'``, the origin pixel is calculated for the valid (non-filled) cutout values. """ return (self.slices_cutout[1].start, self.slices_cutout[0].start) @staticmethod def _round(a): """ Round the input to the nearest integer. If two integers are equally close, the value is rounded up. Note that this is different from `np.round`, which rounds to the nearest even number. """ return int(np.floor(a + 0.5)) @lazyproperty def position_original(self): """ The ``(x, y)`` position index (rounded to the nearest pixel) in the original array. """ return (self._round(self.input_position_original[0]), self._round(self.input_position_original[1])) @lazyproperty def position_cutout(self): """ The ``(x, y)`` position index (rounded to the nearest pixel) in the cutout array. """ return (self._round(self.input_position_cutout[0]), self._round(self.input_position_cutout[1])) @lazyproperty def center_original(self): """ The central ``(x, y)`` position of the cutout array with respect to the original array. For ``mode='partial'``, the central position is calculated for the valid (non-filled) cutout values. """ return self._calc_center(self.slices_original) @lazyproperty def center_cutout(self): """ The central ``(x, y)`` position of the cutout array with respect to the cutout array. For ``mode='partial'``, the central position is calculated for the valid (non-filled) cutout values. """ return self._calc_center(self.slices_cutout) @lazyproperty def bbox_original(self): """ The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal rectangular region of the cutout array with respect to the original array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. """ return self._calc_bbox(self.slices_original) @lazyproperty def bbox_cutout(self): """ The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal rectangular region of the cutout array with respect to the cutout array. For ``mode='partial'``, the bounding box indices are for the valid (non-filled) cutout values. """ return self._calc_bbox(self.slices_cutout)
2a3550cc52868c9b3e9599bb19101dd009351ef68f544fe09eb19ce6ec3c1c49
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module contains a class equivalent to pre-1.0 NDData. import numpy as np from astropy.units import UnitsError, UnitConversionError, Unit from astropy import log from .nddata import NDData from .nduncertainty import NDUncertainty from .mixins.ndslicing import NDSlicingMixin from .mixins.ndarithmetic import NDArithmeticMixin from .mixins.ndio import NDIOMixin from .flag_collection import FlagCollection __all__ = ['NDDataArray'] class NDDataArray(NDArithmeticMixin, NDSlicingMixin, NDIOMixin, NDData): """ An ``NDData`` object with arithmetic. This class is functionally equivalent to ``NDData`` in astropy versions prior to 1.0. The key distinction from raw numpy arrays is the presence of additional metadata such as uncertainties, a mask, units, flags, and/or a coordinate system. See also: https://docs.astropy.org/en/stable/nddata/ Parameters ---------- data : ndarray or `NDData` The actual data contained in this `NDData` object. Not that this will always be copies by *reference* , so you should make copy the ``data`` before passing it in if that's the desired behavior. uncertainty : `~astropy.nddata.NDUncertainty`, optional Uncertainties on the data. mask : array-like, optional Mask for the data, given as a boolean Numpy array or any object that can be converted to a boolean Numpy array with a shape matching that of the data. The values must be ``False`` where the data is *valid* and ``True`` when it is not (like Numpy masked arrays). If ``data`` is a numpy masked array, providing ``mask`` here will causes the mask from the masked array to be ignored. flags : array-like or `~astropy.nddata.FlagCollection`, optional Flags giving information about each pixel. These can be specified either as a Numpy array of any type (or an object which can be converted to a Numpy array) with a shape matching that of the data, or as a `~astropy.nddata.FlagCollection` instance which has a shape matching that of the data. wcs : None, optional WCS-object containing the world coordinate system for the data. .. warning:: This is not yet defined because the discussion of how best to represent this class's WCS system generically is still under consideration. For now just leave it as None meta : `dict`-like object, optional Metadata for this object. "Metadata" here means all information that is included with this object but not part of any other attribute of this particular object. e.g., creation date, unique identifier, simulation parameters, exposure time, telescope name, etc. unit : `~astropy.units.UnitBase` instance or str, optional The units of the data. Raises ------ ValueError : If the `uncertainty` or `mask` inputs cannot be broadcast (e.g., match shape) onto ``data``. """ def __init__(self, data, *args, flags=None, **kwargs): # Initialize with the parent... super().__init__(data, *args, **kwargs) # ...then reset uncertainty to force it to go through the # setter logic below. In base NDData all that is done is to # set self._uncertainty to whatever uncertainty is passed in. self.uncertainty = self._uncertainty # Same thing for mask. self.mask = self._mask # Initial flags because it is no longer handled in NDData # or NDDataBase. if isinstance(data, NDDataArray): if flags is None: flags = data.flags else: log.info("Overwriting NDDataArrays's current " "flags with specified flags") self.flags = flags # Implement uncertainty as NDUncertainty to support propagation of # uncertainties in arithmetic operations @property def uncertainty(self): return self._uncertainty @uncertainty.setter def uncertainty(self, value): if value is not None: if isinstance(value, NDUncertainty): class_name = self.__class__.__name__ if not self.unit and value._unit: # Raise an error if uncertainty has unit and data does not raise ValueError("Cannot assign an uncertainty with unit " "to {} without " "a unit".format(class_name)) self._uncertainty = value self._uncertainty.parent_nddata = self else: raise TypeError("Uncertainty must be an instance of " "a NDUncertainty object") else: self._uncertainty = value # Override unit so that we can add a setter. @property def unit(self): return self._unit @unit.setter def unit(self, value): from . import conf try: if self._unit is not None and conf.warn_setting_unit_directly: log.info('Setting the unit directly changes the unit without ' 'updating the data or uncertainty. Use the ' '.convert_unit_to() method to change the unit and ' 'scale values appropriately.') except AttributeError: # raised if self._unit has not been set yet, in which case the # warning is irrelevant pass if value is None: self._unit = None else: self._unit = Unit(value) # Implement mask in a way that converts nicely to a numpy masked array @property def mask(self): if self._mask is np.ma.nomask: return None else: return self._mask @mask.setter def mask(self, value): # Check that value is not either type of null mask. if (value is not None) and (value is not np.ma.nomask): mask = np.array(value, dtype=np.bool_, copy=False) if mask.shape != self.data.shape: raise ValueError("dimensions of mask do not match data") else: self._mask = mask else: # internal representation should be one numpy understands self._mask = np.ma.nomask @property def shape(self): """ shape tuple of this object's data. """ return self.data.shape @property def size(self): """ integer size of this object's data. """ return self.data.size @property def dtype(self): """ `numpy.dtype` of this object's data. """ return self.data.dtype @property def ndim(self): """ integer dimensions of this object's data """ return self.data.ndim @property def flags(self): return self._flags @flags.setter def flags(self, value): if value is not None: if isinstance(value, FlagCollection): if value.shape != self.shape: raise ValueError("dimensions of FlagCollection does not match data") else: self._flags = value else: flags = np.array(value, copy=False) if flags.shape != self.shape: raise ValueError("dimensions of flags do not match data") else: self._flags = flags else: self._flags = value def __array__(self): """ This allows code that requests a Numpy array to use an NDData object as a Numpy array. """ if self.mask is not None: return np.ma.masked_array(self.data, self.mask) else: return np.array(self.data) def __array_prepare__(self, array, context=None): """ This ensures that a masked array is returned if self is masked. """ if self.mask is not None: return np.ma.masked_array(array, self.mask) else: return array def convert_unit_to(self, unit, equivalencies=[]): """ Returns a new `NDData` object whose values have been converted to a new unit. Parameters ---------- unit : `astropy.units.UnitBase` instance or str The unit to convert to. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible. See :ref:`astropy:unit_equivalencies`. Returns ------- result : `~astropy.nddata.NDData` The resulting dataset Raises ------ `~astropy.units.UnitsError` If units are inconsistent. """ if self.unit is None: raise ValueError("No unit specified on source data") data = self.unit.to(unit, self.data, equivalencies=equivalencies) if self.uncertainty is not None: uncertainty_values = self.unit.to(unit, self.uncertainty.array, equivalencies=equivalencies) # should work for any uncertainty class uncertainty = self.uncertainty.__class__(uncertainty_values) else: uncertainty = None if self.mask is not None: new_mask = self.mask.copy() else: new_mask = None # Call __class__ in case we are dealing with an inherited type result = self.__class__(data, uncertainty=uncertainty, mask=new_mask, wcs=self.wcs, meta=self.meta, unit=unit) return result
1c02db8bbce4cea4d85471b4eb1670b878c58d89eda7657a41d8a965666547a7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements a class based on NDData with all Mixins. """ from .nddata import NDData from .mixins.ndslicing import NDSlicingMixin from .mixins.ndarithmetic import NDArithmeticMixin from .mixins.ndio import NDIOMixin __all__ = ['NDDataRef'] class NDDataRef(NDArithmeticMixin, NDIOMixin, NDSlicingMixin, NDData): """Implements `NDData` with all Mixins. This class implements a `NDData`-like container that supports reading and writing as implemented in the ``astropy.io.registry`` and also slicing (indexing) and simple arithmetics (add, subtract, divide and multiply). Notes ----- A key distinction from `NDDataArray` is that this class does not attempt to provide anything that was not defined in any of the parent classes. See also -------- NDData NDArithmeticMixin NDSlicingMixin NDIOMixin Examples -------- The mixins allow operation that are not possible with `NDData` or `NDDataBase`, i.e. simple arithmetics:: >>> from astropy.nddata import NDDataRef, StdDevUncertainty >>> import numpy as np >>> data = np.ones((3,3), dtype=float) >>> ndd1 = NDDataRef(data, uncertainty=StdDevUncertainty(data)) >>> ndd2 = NDDataRef(data, uncertainty=StdDevUncertainty(data)) >>> ndd3 = ndd1.add(ndd2) >>> ndd3.data # doctest: +FLOAT_CMP array([[2., 2., 2.], [2., 2., 2.], [2., 2., 2.]]) >>> ndd3.uncertainty.array # doctest: +FLOAT_CMP array([[1.41421356, 1.41421356, 1.41421356], [1.41421356, 1.41421356, 1.41421356], [1.41421356, 1.41421356, 1.41421356]]) see `NDArithmeticMixin` for a complete list of all supported arithmetic operations. But also slicing (indexing) is possible:: >>> ndd4 = ndd3[1,:] >>> ndd4.data # doctest: +FLOAT_CMP array([2., 2., 2.]) >>> ndd4.uncertainty.array # doctest: +FLOAT_CMP array([1.41421356, 1.41421356, 1.41421356]) See `NDSlicingMixin` for a description how slicing works (which attributes) are sliced. """ pass
b7ecbb7799ce3d630cbf431f00f8e35caef21e64cc5e9729eae5cf6a5dc4922f
""" This package contains utilities that are only used when developing astropy in a copy of the source repository. These files are not installed, and should not be assumed to exist at runtime. """
104a4df2e1cf56a864a81e2934dea1f845bf109388c66791809b4f23ce4cf27f
# Try to use setuptools_scm to get the current version; this is only used # in development installations from the git repository. import os.path as pth try: from setuptools_scm import get_version version = get_version(root=pth.join('..', '..'), relative_to=__file__) except Exception: raise ImportError('setuptools_scm broken or not installed')
393c376700f73581b7982cea2a3e16679726c209c9fce377551765bd8b29337b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings from erfa import core, helpers, ufunc # noqa from erfa.core import * # noqa from erfa.helpers import leap_seconds # noqa from erfa.ufunc import (dt_dmsf, dt_eraASTROM, dt_eraLDBODY, # noqa dt_eraLEAPSECOND, dt_hmsf, dt_pv, dt_sign, dt_type, dt_ymdf) from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn('The private astropy._erfa module has been made into its ' 'own package, pyerfa, which is a dependency of ' 'astropy and can be imported directly using "import erfa"', AstropyDeprecationWarning)
7469943f24848108d5908d6771e1e01d691619ec23f06ee36c8d76f4ef107038
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ .. _wcslib: https://www.atnf.csiro.au/people/mcalabre/WCS/wcslib/index.html .. _distortion paper: https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf .. _SIP: https://irsa.ipac.caltech.edu/data/SPITZER/docs/files/spitzer/shupeADASS.pdf .. _FITS WCS standard: https://fits.gsfc.nasa.gov/fits_wcs.html `astropy.wcs` contains utilities for managing World Coordinate System (WCS) transformations in FITS files. These transformations map the pixel locations in an image to their real-world units, such as their position on the sky sphere. It performs three separate classes of WCS transformations: - Core WCS, as defined in the `FITS WCS standard`_, based on Mark Calabretta's `wcslib`_. See `~astropy.wcs.Wcsprm`. - Simple Imaging Polynomial (`SIP`_) convention. See `~astropy.wcs.Sip`. - table lookup distortions as defined in WCS `distortion paper`_. See `~astropy.wcs.DistortionLookupTable`. Each of these transformations can be used independently or together in a standard pipeline. """ from .wcs import * from .wcs import InvalidTabularParametersError # just for docs from . import utils def get_include(): """ Get the path to astropy.wcs's C header files. """ import os return os.path.join(os.path.dirname(__file__), "include")
c4a29d4947b97acd96582a8330344b80e0c2b14c8d44072f50a506f7f9b2c8dd
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Script support for validating the WCS keywords in a FITS file. """ def main(args=None): from . import wcs import argparse parser = argparse.ArgumentParser( description=("Check the WCS keywords in a FITS file for " "compliance against the standards")) parser.add_argument( 'filename', nargs=1, help='Path to FITS file to check') args = parser.parse_args(args) print(wcs.validate(args.filename[0]))
2e65364cac93574374188290790db838cc076854d2135d7242951c2657f1c3b6
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import os from os.path import join import os.path import shutil import sys from collections import defaultdict from setuptools import Extension from setuptools.dep_util import newer_group import numpy from extension_helpers import import_file, write_if_different, get_compiler, pkg_config WCSROOT = os.path.relpath(os.path.dirname(__file__)) WCSVERSION = "7.7" def b(s): return s.encode('ascii') def string_escape(s): s = s.decode('ascii').encode('ascii', 'backslashreplace') s = s.replace(b'\n', b'\\n') s = s.replace(b'\0', b'\\0') return s.decode('ascii') def determine_64_bit_int(): """ The only configuration parameter needed at compile-time is how to specify a 64-bit signed integer. Python's ctypes module can get us that information. If we can't be absolutely certain, we default to "long long int", which is correct on most platforms (x86, x86_64). If we find platforms where this heuristic doesn't work, we may need to hardcode for them. """ try: try: import ctypes except ImportError: raise ValueError() if ctypes.sizeof(ctypes.c_longlong) == 8: return "long long int" elif ctypes.sizeof(ctypes.c_long) == 8: return "long int" elif ctypes.sizeof(ctypes.c_int) == 8: return "int" else: raise ValueError() except ValueError: return "long long int" def write_wcsconfig_h(paths): """ Writes out the wcsconfig.h header with local configuration. """ h_file = io.StringIO() h_file.write(""" /* The bundled version has WCSLIB_VERSION */ #define HAVE_WCSLIB_VERSION 1 /* WCSLIB library version number. */ #define WCSLIB_VERSION {} /* 64-bit integer data type. */ #define WCSLIB_INT64 {} /* Windows needs some other defines to prevent inclusion of wcsset() which conflicts with wcslib's wcsset(). These need to be set on code that *uses* astropy.wcs, in addition to astropy.wcs itself. */ #if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__) #ifndef YY_NO_UNISTD_H #define YY_NO_UNISTD_H #endif #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifndef _NO_OLDNAMES #define _NO_OLDNAMES #endif #ifndef NO_OLDNAMES #define NO_OLDNAMES #endif #ifndef __STDC__ #define __STDC__ 1 #endif #endif """.format(WCSVERSION, determine_64_bit_int())) content = h_file.getvalue().encode('ascii') for path in paths: write_if_different(path, content) ###################################################################### # GENERATE DOCSTRINGS IN C def generate_c_docstrings(): docstrings = import_file(os.path.join(WCSROOT, 'docstrings.py')) docstrings = docstrings.__dict__ keys = [ key for key, val in docstrings.items() if not key.startswith('__') and isinstance(val, str)] keys.sort() docs = {} for key in keys: docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0' h_file = io.StringIO() h_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py */ #ifndef __DOCSTRINGS_H__ #define __DOCSTRINGS_H__ """) for key in keys: val = docs[key] h_file.write(f'extern char doc_{key}[{len(val)}];\n') h_file.write("\n#endif\n\n") write_if_different( join(WCSROOT, 'include', 'astropy_wcs', 'docstrings.h'), h_file.getvalue().encode('utf-8')) c_file = io.StringIO() c_file.write("""/* DO NOT EDIT! This file is autogenerated by astropy/wcs/setup_package.py. To edit its contents, edit astropy/wcs/docstrings.py The weirdness here with strncpy is because some C compilers, notably MSVC, do not support string literals greater than 256 characters. */ #include <string.h> #include "astropy_wcs/docstrings.h" """) for key in keys: val = docs[key] c_file.write(f'char doc_{key}[{len(val)}] = {{\n') for i in range(0, len(val), 12): section = val[i:i+12] c_file.write(' ') c_file.write(''.join(f'0x{x:02x}, ' for x in section)) c_file.write('\n') c_file.write(" };\n\n") write_if_different( join(WCSROOT, 'src', 'docstrings.c'), c_file.getvalue().encode('utf-8')) def get_wcslib_cfg(cfg, wcslib_files, include_paths): debug = '--debug' in sys.argv cfg['include_dirs'].append(numpy.get_include()) cfg['define_macros'].extend([ ('ECHO', None), ('WCSTRIG_MACRO', None), ('ASTROPY_WCS_BUILD', None), ('_GNU_SOURCE', None)]) if ((int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))) and not sys.platform == 'win32'): wcsconfig_h_path = join(WCSROOT, 'include', 'wcsconfig.h') if os.path.exists(wcsconfig_h_path): os.unlink(wcsconfig_h_path) for k, v in pkg_config(['wcslib'], ['wcs']).items(): cfg[k].extend(v) else: write_wcsconfig_h(include_paths) wcslib_path = join("cextern", "wcslib") # Path to wcslib wcslib_cpath = join(wcslib_path, "C") # Path to wcslib source files cfg['sources'].extend(join(wcslib_cpath, x) for x in wcslib_files) cfg['include_dirs'].append(wcslib_cpath) if debug: cfg['define_macros'].append(('DEBUG', None)) cfg['undef_macros'].append('NDEBUG') if (not sys.platform.startswith('sun') and not sys.platform == 'win32'): cfg['extra_compile_args'].extend(["-fno-inline", "-O0", "-g"]) else: # Define ECHO as nothing to prevent spurious newlines from # printing within the libwcs parser cfg['define_macros'].append(('NDEBUG', None)) cfg['undef_macros'].append('DEBUG') if sys.platform == 'win32': # These are written into wcsconfig.h, but that file is not # used by all parts of wcslib. cfg['define_macros'].extend([ ('YY_NO_UNISTD_H', None), ('_CRT_SECURE_NO_WARNINGS', None), ('_NO_OLDNAMES', None), # for mingw32 ('NO_OLDNAMES', None), # for mingw64 ('__STDC__', None) # for MSVC ]) if sys.platform.startswith('linux'): cfg['define_macros'].append(('HAVE_SINCOS', None)) # For 4.7+ enable C99 syntax in older compilers (need 'gnu99' std for gcc) if get_compiler() == 'unix': cfg['extra_compile_args'].extend(['-std=gnu99']) # Squelch a few compilation warnings in WCSLIB if get_compiler() in ('unix', 'mingw32'): if not debug: cfg['extra_compile_args'].extend([ '-Wno-strict-prototypes', '-Wno-unused-function', '-Wno-unused-value', '-Wno-uninitialized']) def get_extensions(): generate_c_docstrings() ###################################################################### # DISTUTILS SETUP cfg = defaultdict(list) wcslib_files = [ # List of wcslib files to compile 'flexed/wcsbth.c', 'flexed/wcspih.c', 'flexed/wcsulex.c', 'flexed/wcsutrn.c', 'cel.c', 'dis.c', 'lin.c', 'log.c', 'prj.c', 'spc.c', 'sph.c', 'spx.c', 'tab.c', 'wcs.c', 'wcserr.c', 'wcsfix.c', 'wcshdr.c', 'wcsprintf.c', 'wcsunits.c', 'wcsutil.c' ] wcslib_config_paths = [ join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'), join(WCSROOT, 'include', 'wcsconfig.h') ] get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(join(WCSROOT, "include")) astropy_wcs_files = [ # List of astropy.wcs files to compile 'distortion.c', 'distortion_wrap.c', 'docstrings.c', 'pipeline.c', 'pyutil.c', 'astropy_wcs.c', 'astropy_wcs_api.c', 'sip.c', 'sip_wrap.c', 'str_list_proxy.c', 'unit_list_proxy.c', 'util.c', 'wcslib_wrap.c', 'wcslib_auxprm_wrap.c', 'wcslib_prjprm_wrap.c', 'wcslib_celprm_wrap.c', 'wcslib_tabprm_wrap.c', 'wcslib_wtbarr_wrap.c' ] cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in cfg.items()) # Copy over header files from WCSLIB into the installed version of Astropy # so that other Python packages can write extensions that link to it. We # do the copying here then include the data in [options.package_data] in # the setup.cfg file wcslib_headers = [ 'cel.h', 'lin.h', 'prj.h', 'spc.h', 'spx.h', 'tab.h', 'wcs.h', 'wcserr.h', 'wcsmath.h', 'wcsprintf.h', ] if not (int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0)) or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))): for header in wcslib_headers: source = join('cextern', 'wcslib', 'C', header) dest = join('astropy', 'wcs', 'include', 'wcslib', header) if newer_group([source], dest, 'newer'): shutil.copy(source, dest) return [Extension('astropy.wcs._wcs', **cfg)]
e413db65a6606dd394400f73474007ba5d8690a17a4479c83882bf30f2ba0924
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import numpy as np import astropy.units as u from astropy.coordinates import CartesianRepresentation, SphericalRepresentation, ITRS from astropy.utils import unbroadcast from .wcs import WCS, WCSSUB_LATITUDE, WCSSUB_LONGITUDE __doctest_skip__ = ['wcs_to_celestial_frame', 'celestial_frame_to_wcs'] __all__ = ['obsgeo_to_frame', 'add_stokes_axis_to_wcs', 'celestial_frame_to_wcs', 'wcs_to_celestial_frame', 'proj_plane_pixel_scales', 'proj_plane_pixel_area', 'is_proj_plane_distorted', 'non_celestial_pixel_scales', 'skycoord_to_pixel', 'pixel_to_skycoord', 'custom_wcs_to_frame_mappings', 'custom_frame_to_wcs_mappings', 'pixel_to_pixel', 'local_partial_pixel_derivatives', 'fit_wcs_from_points'] def add_stokes_axis_to_wcs(wcs, add_before_ind): """ Add a new Stokes axis that is uncorrelated with any other axes. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to add to add_before_ind : int Index of the WCS to insert the new Stokes axis in front of. To add at the end, do add_before_ind = wcs.wcs.naxis The beginning is at position 0. Returns ------- `~astropy.wcs.WCS` A new `~astropy.wcs.WCS` instance with an additional axis """ inds = [i + 1 for i in range(wcs.wcs.naxis)] inds.insert(add_before_ind, 0) newwcs = wcs.sub(inds) newwcs.wcs.ctype[add_before_ind] = 'STOKES' newwcs.wcs.cname[add_before_ind] = 'STOKES' return newwcs def _wcs_to_celestial_frame_builtin(wcs): # Import astropy.coordinates here to avoid circular imports from astropy.coordinates import (FK4, FK5, ICRS, ITRS, FK4NoETerms, Galactic, SphericalRepresentation) # Import astropy.time here otherwise setup.py fails before extensions are compiled from astropy.time import Time if wcs.wcs.lng == -1 or wcs.wcs.lat == -1: return None radesys = wcs.wcs.radesys if np.isnan(wcs.wcs.equinox): equinox = None else: equinox = wcs.wcs.equinox xcoord = wcs.wcs.ctype[wcs.wcs.lng][:4] ycoord = wcs.wcs.ctype[wcs.wcs.lat][:4] # Apply logic from FITS standard to determine the default radesys if radesys == '' and xcoord == 'RA--' and ycoord == 'DEC-': if equinox is None: radesys = "ICRS" elif equinox < 1984.: radesys = "FK4" else: radesys = "FK5" if radesys == 'FK4': if equinox is not None: equinox = Time(equinox, format='byear') frame = FK4(equinox=equinox) elif radesys == 'FK4-NO-E': if equinox is not None: equinox = Time(equinox, format='byear') frame = FK4NoETerms(equinox=equinox) elif radesys == 'FK5': if equinox is not None: equinox = Time(equinox, format='jyear') frame = FK5(equinox=equinox) elif radesys == 'ICRS': frame = ICRS() else: if xcoord == 'GLON' and ycoord == 'GLAT': frame = Galactic() elif xcoord == 'TLON' and ycoord == 'TLAT': # The default representation for ITRS is cartesian, but for WCS # purposes, we need the spherical representation. frame = ITRS(representation_type=SphericalRepresentation, obstime=wcs.wcs.dateobs or None) else: frame = None return frame def _celestial_frame_to_wcs_builtin(frame, projection='TAN'): # Import astropy.coordinates here to avoid circular imports from astropy.coordinates import FK4, FK5, ICRS, ITRS, BaseRADecFrame, FK4NoETerms, Galactic # Create a 2-dimensional WCS wcs = WCS(naxis=2) if isinstance(frame, BaseRADecFrame): xcoord = 'RA--' ycoord = 'DEC-' if isinstance(frame, ICRS): wcs.wcs.radesys = 'ICRS' elif isinstance(frame, FK4NoETerms): wcs.wcs.radesys = 'FK4-NO-E' wcs.wcs.equinox = frame.equinox.byear elif isinstance(frame, FK4): wcs.wcs.radesys = 'FK4' wcs.wcs.equinox = frame.equinox.byear elif isinstance(frame, FK5): wcs.wcs.radesys = 'FK5' wcs.wcs.equinox = frame.equinox.jyear else: return None elif isinstance(frame, Galactic): xcoord = 'GLON' ycoord = 'GLAT' elif isinstance(frame, ITRS): xcoord = 'TLON' ycoord = 'TLAT' wcs.wcs.radesys = 'ITRS' wcs.wcs.dateobs = frame.obstime.utc.isot else: return None wcs.wcs.ctype = [xcoord + '-' + projection, ycoord + '-' + projection] return wcs WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]] FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]] class custom_wcs_to_frame_mappings: def __init__(self, mappings=[]): if hasattr(mappings, '__call__'): mappings = [mappings] WCS_FRAME_MAPPINGS.append(mappings) def __enter__(self): pass def __exit__(self, type, value, tb): WCS_FRAME_MAPPINGS.pop() # Backward-compatibility custom_frame_mappings = custom_wcs_to_frame_mappings class custom_frame_to_wcs_mappings: def __init__(self, mappings=[]): if hasattr(mappings, '__call__'): mappings = [mappings] FRAME_WCS_MAPPINGS.append(mappings) def __enter__(self): pass def __exit__(self, type, value, tb): FRAME_WCS_MAPPINGS.pop() def wcs_to_celestial_frame(wcs): """ For a given WCS, return the coordinate frame that matches the celestial component of the WCS. Parameters ---------- wcs : :class:`~astropy.wcs.WCS` instance The WCS to find the frame for Returns ------- frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance that best matches the specified WCS. Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.wcs.WCS` instance and should return either an instance of a frame, or `None` if no matching frame was found. You can register this function temporarily with:: >>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings >>> with custom_wcs_to_frame_mappings(my_function): ... wcs_to_celestial_frame(...) """ for mapping_set in WCS_FRAME_MAPPINGS: for func in mapping_set: frame = func(wcs) if frame is not None: return frame raise ValueError("Could not determine celestial frame corresponding to " "the specified WCS object") def celestial_frame_to_wcs(frame, projection='TAN'): """ For a given coordinate frame, return the corresponding WCS object. Note that the returned WCS object has only the elements corresponding to coordinate frames set (e.g. ctype, equinox, radesys). Parameters ---------- frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance for which to find the WCS projection : str Projection code to use in ctype, if applicable Returns ------- wcs : :class:`~astropy.wcs.WCS` instance The corresponding WCS object Examples -------- :: >>> from astropy.wcs.utils import celestial_frame_to_wcs >>> from astropy.coordinates import FK5 >>> frame = FK5(equinox='J2010') >>> wcs = celestial_frame_to_wcs(frame) >>> wcs.to_header() WCSAXES = 2 / Number of coordinate axes CRPIX1 = 0.0 / Pixel coordinate of reference point CRPIX2 = 0.0 / Pixel coordinate of reference point CDELT1 = 1.0 / [deg] Coordinate increment at reference point CDELT2 = 1.0 / [deg] Coordinate increment at reference point CUNIT1 = 'deg' / Units of coordinate increment and value CUNIT2 = 'deg' / Units of coordinate increment and value CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection CRVAL1 = 0.0 / [deg] Coordinate value at reference point CRVAL2 = 0.0 / [deg] Coordinate value at reference point LONPOLE = 180.0 / [deg] Native longitude of celestial pole LATPOLE = 0.0 / [deg] Native latitude of celestial pole RADESYS = 'FK5' / Equatorial coordinate system EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance and a projection (given as a string) and should return either a WCS instance, or `None` if the WCS could not be determined. You can register this function temporarily with:: >>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings >>> with custom_frame_to_wcs_mappings(my_function): ... celestial_frame_to_wcs(...) """ for mapping_set in FRAME_WCS_MAPPINGS: for func in mapping_set: wcs = func(frame, projection=projection) if wcs is not None: return wcs raise ValueError("Could not determine WCS corresponding to the specified " "coordinate frame.") def proj_plane_pixel_scales(wcs): """ For a WCS returns pixel scales along each axis of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the scales corresponding to celestial axes only, make sure that the input `~astropy.wcs.WCS` object contains celestial axes only, e.g., by passing in the `~astropy.wcs.WCS.celestial` WCS object. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- scale : ndarray A vector (`~numpy.ndarray`) of projection plane increments corresponding to each pixel side (axis). The units of the returned results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the input `~astropy.wcs.WCS` WCS object. See Also -------- astropy.wcs.utils.proj_plane_pixel_area """ return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float)) def proj_plane_pixel_area(wcs): """ For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel area of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: In order to compute the area of pixels corresponding to celestial axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS object of the input ``wcs``. This is different from the `~astropy.wcs.utils.proj_plane_pixel_scales` function that computes the scales for the axes of the input WCS itself. Parameters ---------- wcs : `~astropy.wcs.WCS` A world coordinate system object. Returns ------- area : float Area (in the projection plane) of the pixel at ``CRPIX`` location. The units of the returned result are the same as the units of the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit` property of the `~astropy.wcs.WCS.celestial` WCS object. Raises ------ ValueError Pixel area is defined only for 2D pixels. Most likely the `~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial` WCS is not a square matrix of second order. Notes ----- Depending on the application, square root of the pixel area can be used to represent a single pixel scale of an equivalent square pixel whose area is equal to the area of a generally non-square pixel. See Also -------- astropy.wcs.utils.proj_plane_pixel_scales """ psm = wcs.celestial.pixel_scale_matrix if psm.shape != (2, 2): raise ValueError("Pixel area is defined only for 2D pixels.") return np.abs(np.linalg.det(psm)) def is_proj_plane_distorted(wcs, maxerr=1.0e-5): r""" For a WCS returns `False` if square image (detector) pixels stay square when projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. It will return `True` if transformation from image (detector) coordinates to the focal plane coordinates is non-orthogonal or if WCS contains non-linear (e.g., SIP) distortions. .. note:: Since this function is concerned **only** about the transformation "image plane"->"focal plane" and **not** about the transformation "celestial sphere"->"focal plane"->"image plane", this function ignores distortions arising due to non-linear nature of most projections. Let's denote by *C* either the original or the reconstructed (from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted` verifies that the transformation from image (detector) coordinates to the focal plane coordinates is orthogonal using the following check: .. math:: \left \| \frac{C \cdot C^{\mathrm{T}}} {| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon . Parameters ---------- wcs : `~astropy.wcs.WCS` World coordinate system object maxerr : float, optional Accuracy to which the CD matrix, **normalized** such that :math:`|det(CD)|=1`, should be close to being an orthogonal matrix as described in the above equation (see :math:`\epsilon`). Returns ------- distorted : bool Returns `True` if focal (projection) plane is distorted and `False` otherwise. """ cwcs = wcs.celestial return (not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or _has_distortion(cwcs)) def _is_cd_orthogonal(cd, maxerr): shape = cd.shape if not (len(shape) == 2 and shape[0] == shape[1]): raise ValueError("CD (or PC) matrix must be a 2D square matrix.") pixarea = np.abs(np.linalg.det(cd)) if (pixarea == 0.0): raise ValueError("CD (or PC) matrix is singular.") # NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T)) # However, I am not aware of complex CD/PC matrices... I = np.dot(cd, cd.T) / pixarea cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0]))) return (cd_unitary_err < maxerr) def non_celestial_pixel_scales(inwcs): """ Calculate the pixel scale along each axis of a non-celestial WCS, for example one with mixed spectral and spatial axes. Parameters ---------- inwcs : `~astropy.wcs.WCS` The world coordinate system object. Returns ------- scale : `numpy.ndarray` The pixel scale along each axis. """ if inwcs.is_celestial: raise ValueError("WCS is celestial, use celestial_pixel_scales instead") pccd = inwcs.pixel_scale_matrix if np.allclose(np.extract(1-np.eye(*pccd.shape), pccd), 0): return np.abs(np.diagonal(pccd))*u.deg else: raise ValueError("WCS is rotated, cannot determine consistent pixel scales") def _has_distortion(wcs): """ `True` if contains any SIP or image distortion components. """ return any(getattr(wcs, dist_attr) is not None for dist_attr in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2', 'sip']) # TODO: in future, we should think about how the following two functions can be # integrated better into the WCS class. def skycoord_to_pixel(coords, wcs, origin=0, mode='all'): """ Convert a set of SkyCoord coordinates into pixels. Parameters ---------- coords : `~astropy.coordinates.SkyCoord` The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- xp, yp : `numpy.ndarray` The pixel coordinates See Also -------- astropy.coordinates.SkyCoord.from_pixel """ if _has_distortion(wcs) and wcs.naxis != 2: raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS") # Keep only the celestial part of the axes, also re-orders lon/lat wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE]) if wcs.naxis != 2: raise ValueError("WCS should contain celestial component") # Check which frame the WCS uses frame = wcs_to_celestial_frame(wcs) # Check what unit the WCS needs xw_unit = u.Unit(wcs.wcs.cunit[0]) yw_unit = u.Unit(wcs.wcs.cunit[1]) # Convert positions to frame coords = coords.transform_to(frame) # Extract longitude and latitude. We first try and use lon/lat directly, # but if the representation is not spherical or unit spherical this will # fail. We should then force the use of the unit spherical # representation. We don't do that directly to make sure that we preserve # custom lon/lat representations if available. try: lon = coords.data.lon.to(xw_unit) lat = coords.data.lat.to(yw_unit) except AttributeError: lon = coords.spherical.lon.to(xw_unit) lat = coords.spherical.lat.to(yw_unit) # Convert to pixel coordinates if mode == 'all': xp, yp = wcs.all_world2pix(lon.value, lat.value, origin) elif mode == 'wcs': xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin) else: raise ValueError("mode should be either 'all' or 'wcs'") return xp, yp def pixel_to_skycoord(xp, yp, wcs, origin=0, mode='all', cls=None): """ Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord` coordinate. Parameters ---------- xp, yp : float or ndarray The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS transformation to use. origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). cls : class or None The class of object to create. Should be a `~astropy.coordinates.SkyCoord` subclass. If None, defaults to `~astropy.coordinates.SkyCoord`. Returns ------- coords : `~astropy.coordinates.SkyCoord` subclass The celestial coordinates. Whatever ``cls`` type is. See Also -------- astropy.coordinates.SkyCoord.from_pixel """ # Import astropy.coordinates here to avoid circular imports from astropy.coordinates import SkyCoord, UnitSphericalRepresentation # we have to do this instead of actually setting the default to SkyCoord # because importing SkyCoord at the module-level leads to circular # dependencies. if cls is None: cls = SkyCoord if _has_distortion(wcs) and wcs.naxis != 2: raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS") # Keep only the celestial part of the axes, also re-orders lon/lat wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE]) if wcs.naxis != 2: raise ValueError("WCS should contain celestial component") # Check which frame the WCS uses frame = wcs_to_celestial_frame(wcs) # Check what unit the WCS gives lon_unit = u.Unit(wcs.wcs.cunit[0]) lat_unit = u.Unit(wcs.wcs.cunit[1]) # Convert pixel coordinates to celestial coordinates if mode == 'all': lon, lat = wcs.all_pix2world(xp, yp, origin) elif mode == 'wcs': lon, lat = wcs.wcs_pix2world(xp, yp, origin) else: raise ValueError("mode should be either 'all' or 'wcs'") # Add units to longitude/latitude lon = lon * lon_unit lat = lat * lat_unit # Create a SkyCoord-like object data = UnitSphericalRepresentation(lon=lon, lat=lat) coords = cls(frame.realize_frame(data)) return coords def _unique_with_order_preserved(items): """ Return a list of unique items in the list provided, preserving the order in which they are found. """ new_items = [] for item in items: if item not in new_items: new_items.append(item) return new_items def _pixel_to_world_correlation_matrix(wcs): """ Return a correlation matrix between the pixel coordinates and the high level world coordinates, along with the list of high level world coordinate classes. The shape of the matrix is ``(n_world, n_pix)``, where ``n_world`` is the number of high level world coordinates. """ # We basically want to collapse the world dimensions together that are # combined into the same high-level objects. # Get the following in advance as getting these properties can be expensive all_components = wcs.low_level_wcs.world_axis_object_components all_classes = wcs.low_level_wcs.world_axis_object_classes axis_correlation_matrix = wcs.low_level_wcs.axis_correlation_matrix components = _unique_with_order_preserved([c[0] for c in all_components]) matrix = np.zeros((len(components), wcs.pixel_n_dim), dtype=bool) for iworld in range(wcs.world_n_dim): iworld_unique = components.index(all_components[iworld][0]) matrix[iworld_unique] |= axis_correlation_matrix[iworld] classes = [all_classes[component][0] for component in components] return matrix, classes def _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out): """ Correlation matrix between the input and output pixel coordinates for a pixel -> world -> pixel transformation specified by two WCS instances. The first WCS specified is the one used for the pixel -> world transformation and the second WCS specified is the one used for the world -> pixel transformation. The shape of the matrix is ``(n_pixel_out, n_pixel_in)``. """ matrix1, classes1 = _pixel_to_world_correlation_matrix(wcs_in) matrix2, classes2 = _pixel_to_world_correlation_matrix(wcs_out) if len(classes1) != len(classes2): raise ValueError("The two WCS return a different number of world coordinates") # Check if classes match uniquely unique_match = True mapping = [] for class1 in classes1: matches = classes2.count(class1) if matches == 0: raise ValueError("The world coordinate types of the two WCS do not match") elif matches > 1: unique_match = False break else: mapping.append(classes2.index(class1)) if unique_match: # Classes are unique, so we need to re-order matrix2 along the world # axis using the mapping we found above. matrix2 = matrix2[mapping] elif classes1 != classes2: raise ValueError("World coordinate order doesn't match and automatic matching is ambiguous") matrix = np.matmul(matrix2.T, matrix1) return matrix def _split_matrix(matrix): """ Given an axis correlation matrix from a WCS object, return information about the individual WCS that can be split out. The output is a list of tuples, where each tuple contains a list of pixel dimensions and a list of world dimensions that can be extracted to form a new WCS. For example, in the case of a spectral cube with the first two world coordinates being the celestial coordinates and the third coordinate being an uncorrelated spectral axis, the matrix would look like:: array([[ True, True, False], [ True, True, False], [False, False, True]]) and this function will return ``[([0, 1], [0, 1]), ([2], [2])]``. """ pixel_used = [] split_info = [] for ipix in range(matrix.shape[1]): if ipix in pixel_used: continue pixel_include = np.zeros(matrix.shape[1], dtype=bool) pixel_include[ipix] = True n_pix_prev, n_pix = 0, 1 while n_pix > n_pix_prev: world_include = matrix[:, pixel_include].any(axis=1) pixel_include = matrix[world_include, :].any(axis=0) n_pix_prev, n_pix = n_pix, np.sum(pixel_include) pixel_indices = list(np.nonzero(pixel_include)[0]) world_indices = list(np.nonzero(world_include)[0]) pixel_used.extend(pixel_indices) split_info.append((pixel_indices, world_indices)) return split_info def pixel_to_pixel(wcs_in, wcs_out, *inputs): """ Transform pixel coordinates in a dataset with a WCS to pixel coordinates in another dataset with a different WCS. This function is designed to efficiently deal with input pixel arrays that are broadcasted views of smaller arrays, and is compatible with any APE14-compliant WCS. Parameters ---------- wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS` A WCS object for the original dataset which complies with the high-level shared APE 14 WCS API. wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS` A WCS object for the target dataset which complies with the high-level shared APE 14 WCS API. *inputs : Scalars or arrays giving the pixel coordinates to transform. """ # Shortcut for scalars if np.isscalar(inputs[0]): world_outputs = wcs_in.pixel_to_world(*inputs) if not isinstance(world_outputs, (tuple, list)): world_outputs = (world_outputs,) return wcs_out.world_to_pixel(*world_outputs) # Remember original shape original_shape = inputs[0].shape matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out) split_info = _split_matrix(matrix) outputs = [None] * wcs_out.pixel_n_dim for (pixel_in_indices, pixel_out_indices) in split_info: pixel_inputs = [] for ipix in range(wcs_in.pixel_n_dim): if ipix in pixel_in_indices: pixel_inputs.append(unbroadcast(inputs[ipix])) else: pixel_inputs.append(inputs[ipix].flat[0]) pixel_inputs = np.broadcast_arrays(*pixel_inputs) world_outputs = wcs_in.pixel_to_world(*pixel_inputs) if not isinstance(world_outputs, (tuple, list)): world_outputs = (world_outputs,) pixel_outputs = wcs_out.world_to_pixel(*world_outputs) if wcs_out.pixel_n_dim == 1: pixel_outputs = (pixel_outputs,) for ipix in range(wcs_out.pixel_n_dim): if ipix in pixel_out_indices: outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape) return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs def local_partial_pixel_derivatives(wcs, *pixel, normalize_by_world=False): """ Return a matrix of shape ``(world_n_dim, pixel_n_dim)`` where each entry ``[i, j]`` is the partial derivative d(world_i)/d(pixel_j) at the requested pixel position. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS transformation to evaluate the derivatives for. *pixel : float The scalar pixel coordinates at which to evaluate the derivatives. normalize_by_world : bool If `True`, the matrix is normalized so that for each world entry the derivatives add up to 1. """ # Find the world coordinates at the requested pixel pixel_ref = np.array(pixel) world_ref = np.array(wcs.pixel_to_world_values(*pixel_ref)) # Set up the derivative matrix derivatives = np.zeros((wcs.world_n_dim, wcs.pixel_n_dim)) for i in range(wcs.pixel_n_dim): pixel_off = pixel_ref.copy() pixel_off[i] += 1 world_off = np.array(wcs.pixel_to_world_values(*pixel_off)) derivatives[:, i] = world_off - world_ref if normalize_by_world: derivatives /= derivatives.sum(axis=0)[:, np.newaxis] return derivatives def _linear_wcs_fit(params, lon, lat, x, y, w_obj): """ Objective function for fitting linear terms. Parameters ---------- params : array 6 element array. First 4 elements are PC matrix, last 2 are CRPIX. lon, lat: array Sky coordinates. x, y: array Pixel coordinates w_obj: `~astropy.wcs.WCS` WCS object """ cd = params[0:4] crpix = params[4:6] w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3])) w_obj.wcs.crpix = crpix lon2, lat2 = w_obj.wcs_pix2world(x, y, 0) lat_resids = lat - lat2 lon_resids = lon - lon2 # In case the longitude has wrapped around lon_resids = np.mod(lon_resids - 180.0, 360.0) - 180.0 resids = np.concatenate((lon_resids * np.cos(np.radians(lat)), lat_resids)) return resids def _sip_fit(params, lon, lat, u, v, w_obj, order, coeff_names): """ Objective function for fitting SIP. Parameters ---------- params : array Fittable parameters. First 4 elements are PC matrix, last 2 are CRPIX. lon, lat: array Sky coordinates. u, v: array Pixel coordinates w_obj: `~astropy.wcs.WCS` WCS object """ from ..modeling.models import SIP # here to avoid circular import # unpack params crpix = params[0:2] cdx = params[2:6].reshape((2, 2)) a_params = params[6:6+len(coeff_names)] b_params = params[6+len(coeff_names):] # assign to wcs, used for transfomations in this function w_obj.wcs.cd = cdx w_obj.wcs.crpix = crpix a_coeff, b_coeff = {}, {} for i in range(len(coeff_names)): a_coeff['A_' + coeff_names[i]] = a_params[i] b_coeff['B_' + coeff_names[i]] = b_params[i] sip = SIP(crpix=crpix, a_order=order, b_order=order, a_coeff=a_coeff, b_coeff=b_coeff) fuv, guv = sip(u, v) xo, yo = np.dot(cdx, np.array([u+fuv-crpix[0], v+guv-crpix[1]])) # use all pix2world in case `projection` contains distortion table x, y = w_obj.all_world2pix(lon, lat, 0) x, y = np.dot(w_obj.wcs.cd, (x-w_obj.wcs.crpix[0], y-w_obj.wcs.crpix[1])) resids = np.concatenate((x-xo, y-yo)) return resids def fit_wcs_from_points(xy, world_coords, proj_point='center', projection='TAN', sip_degree=None): """ Given two matching sets of coordinates on detector and sky, compute the WCS. Fits a WCS object to matched set of input detector and sky coordinates. Optionally, a SIP can be fit to account for geometric distortion. Returns an `~astropy.wcs.WCS` object with the best fit parameters for mapping between input pixel and sky coordinates. The projection type (default 'TAN') can passed in as a string, one of the valid three-letter projection codes - or as a WCS object with projection keywords already set. Note that if an input WCS has any non-polynomial distortion, this will be applied and reflected in the fit terms and coefficients. Passing in a WCS object in this way essentially allows it to be refit based on the matched input coordinates and projection point, but take care when using this option as non-projection related keywords in the input might cause unexpected behavior. Notes ----- - The fiducial point for the spherical projection can be set to 'center' to use the mean position of input sky coordinates, or as an `~astropy.coordinates.SkyCoord` object. - Units in all output WCS objects will always be in degrees. - If the coordinate frame differs between `~astropy.coordinates.SkyCoord` objects passed in for ``world_coords`` and ``proj_point``, the frame for ``world_coords`` will override as the frame for the output WCS. - If a WCS object is passed in to ``projection`` the CD/PC matrix will be used as an initial guess for the fit. If this is known to be significantly off and may throw off the fit, set to the identity matrix (for example, by doing wcs.wcs.pc = [(1., 0.,), (0., 1.)]) Parameters ---------- xy : (`numpy.ndarray`, `numpy.ndarray`) tuple x & y pixel coordinates. world_coords : `~astropy.coordinates.SkyCoord` Skycoord object with world coordinates. proj_point : 'center' or ~astropy.coordinates.SkyCoord` Defaults to 'center', in which the geometric center of input world coordinates will be used as the projection point. To specify an exact point for the projection, a Skycoord object with a coordinate pair can be passed in. For consistency, the units and frame of these coordinates will be transformed to match ``world_coords`` if they don't. projection : str or `~astropy.wcs.WCS` Three letter projection code, of any of standard projections defined in the FITS WCS standard. Optionally, a WCS object with projection keywords set may be passed in. sip_degree : None or int If set to a non-zero integer value, will fit SIP of degree ``sip_degree`` to model geometric distortion. Defaults to None, meaning no distortion corrections will be fit. Returns ------- wcs : `~astropy.wcs.WCS` The best-fit WCS to the points given. """ from scipy.optimize import least_squares import astropy.units as u from astropy.coordinates import SkyCoord # here to avoid circular import from .wcs import Sip xp, yp = xy try: lon, lat = world_coords.data.lon.deg, world_coords.data.lat.deg except AttributeError: unit_sph = world_coords.unit_spherical lon, lat = unit_sph.lon.deg, unit_sph.lat.deg # verify input if (type(proj_point) != type(world_coords)) and (proj_point != 'center'): raise ValueError("proj_point must be set to 'center', or an" + "`~astropy.coordinates.SkyCoord` object with " + "a pair of points.") use_center_as_proj_point = (str(proj_point) == 'center') if not use_center_as_proj_point: assert proj_point.size == 1 proj_codes = [ 'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP', 'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE', 'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH' ] if type(projection) == str: if projection not in proj_codes: raise ValueError("Must specify valid projection code from list of " + "supported types: ", ', '.join(proj_codes)) # empty wcs to fill in with fit values wcs = celestial_frame_to_wcs(frame=world_coords.frame, projection=projection) else: #if projection is not string, should be wcs object. use as template. wcs = copy.deepcopy(projection) wcs.cdelt = (1., 1.) # make sure cdelt is 1 wcs.sip = None # Change PC to CD, since cdelt will be set to 1 if wcs.wcs.has_pc(): wcs.wcs.cd = wcs.wcs.pc wcs.wcs.__delattr__('pc') if (type(sip_degree) != type(None)) and (type(sip_degree) != int): raise ValueError("sip_degree must be None, or integer.") # compute bounding box for sources in image coordinates: xpmin, xpmax, ypmin, ypmax = xp.min(), xp.max(), yp.min(), yp.max() # set pixel_shape to span of input points wcs.pixel_shape = (1 if xpmax <= 0.0 else int(np.ceil(xpmax)), 1 if ypmax <= 0.0 else int(np.ceil(ypmax))) # determine CRVAL from input close = lambda l, p: p[np.argmin(np.abs(l))] if use_center_as_proj_point: # use center of input points sc1 = SkyCoord(lon.min()*u.deg, lat.max()*u.deg) sc2 = SkyCoord(lon.max()*u.deg, lat.min()*u.deg) pa = sc1.position_angle(sc2) sep = sc1.separation(sc2) midpoint_sc = sc1.directional_offset_by(pa, sep/2) wcs.wcs.crval = ((midpoint_sc.data.lon.deg, midpoint_sc.data.lat.deg)) wcs.wcs.crpix = ((xpmax + xpmin) / 2., (ypmax + ypmin) / 2.) else: # convert units, initial guess for crpix proj_point.transform_to(world_coords) wcs.wcs.crval = (proj_point.data.lon.deg, proj_point.data.lat.deg) wcs.wcs.crpix = (close(lon - wcs.wcs.crval[0], xp + 1), close(lon - wcs.wcs.crval[1], yp + 1)) # fit linear terms, assign to wcs # use (1, 0, 0, 1) as initial guess, in case input wcs was passed in # and cd terms are way off. # Use bounds to require that the fit center pixel is on the input image if xpmin == xpmax: xpmin, xpmax = xpmin - 0.5, xpmax + 0.5 if ypmin == ypmax: ypmin, ypmax = ypmin - 0.5, ypmax + 0.5 p0 = np.concatenate([wcs.wcs.cd.flatten(), wcs.wcs.crpix.flatten()]) fit = least_squares( _linear_wcs_fit, p0, args=(lon, lat, xp, yp, wcs), bounds=[[-np.inf, -np.inf, -np.inf, -np.inf, xpmin + 1, ypmin + 1], [np.inf, np.inf, np.inf, np.inf, xpmax + 1, ypmax + 1]] ) wcs.wcs.crpix = np.array(fit.x[4:6]) wcs.wcs.cd = np.array(fit.x[0:4].reshape((2, 2))) # fit SIP, if specified. Only fit forward coefficients if sip_degree: degree = sip_degree if '-SIP' not in wcs.wcs.ctype[0]: wcs.wcs.ctype = [x + '-SIP' for x in wcs.wcs.ctype] coef_names = [f'{i}_{j}' for i in range(degree+1) for j in range(degree+1) if (i+j) < (degree+1) and (i+j) > 1] p0 = np.concatenate((np.array(wcs.wcs.crpix), wcs.wcs.cd.flatten(), np.zeros(2*len(coef_names)))) fit = least_squares( _sip_fit, p0, args=(lon, lat, xp, yp, wcs, degree, coef_names), bounds=[[xpmin + 1, ypmin + 1] + [-np.inf]*(4 + 2*len(coef_names)), [xpmax + 1, ypmax + 1] + [np.inf]*(4 + 2*len(coef_names))] ) coef_fit = (list(fit.x[6:6+len(coef_names)]), list(fit.x[6+len(coef_names):])) # put fit values in wcs wcs.wcs.cd = fit.x[2:6].reshape((2, 2)) wcs.wcs.crpix = fit.x[0:2] a_vals = np.zeros((degree+1, degree+1)) b_vals = np.zeros((degree+1, degree+1)) for coef_name in coef_names: a_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[0].pop(0) b_vals[int(coef_name[0])][int(coef_name[2])] = coef_fit[1].pop(0) wcs.sip = Sip(a_vals, b_vals, np.zeros((degree+1, degree+1)), np.zeros((degree+1, degree+1)), wcs.wcs.crpix) return wcs def obsgeo_to_frame(obsgeo, obstime): """ Convert a WCS obsgeo property into an `~.builtin_frames.ITRS` coordinate frame. Parameters ---------- obsgeo : array-like A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as returned by ``WCS.wcs.obsgeo``. obstime : time-like The time associated with the coordinate, will be passed to `~.builtin_frames.ITRS` as the obstime keyword. Returns ------- `~.builtin_frames.ITRS` An `~.builtin_frames.ITRS` coordinate frame representing the coordinates. Notes ----- The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array where the first three elements are the coordinate in a cartesian representation and the second 3 are the coordinate in a spherical representation. This function priorities reading the cartesian coordinates, and will only read the spherical coordinates if the cartesian coordinates are either all zero or any of the cartesian coordinates are non-finite. In the case where both the spherical and cartesian coordinates have some non-finite values the spherical coordinates will be returned with the non-finite values included. """ if (obsgeo is None or len(obsgeo) != 6 or np.all(np.array(obsgeo) == 0) or np.all(~np.isfinite(obsgeo)) ): raise ValueError(f"Can not parse the 'obsgeo' location ({obsgeo}). " "obsgeo should be a length 6 non-zero, finite numpy array") # If the cartesian coords are zero or have NaNs in them use the spherical ones if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])): data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m))) # Otherwise we assume the cartesian ones are valid else: data = CartesianRepresentation(*obsgeo[:3] * u.m) return ITRS(data, obstime=obstime)
c07bbb5b8c24bc9546d1c45e5b700fa9d0777e9f68a9cc95c821bc712d9204ed
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Under the hood, there are 3 separate classes that perform different # parts of the transformation: # # - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS # functionality in `wcslib`_. (This includes TPV and TPD # polynomial distortion, but not SIP distortion). # # - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the # `SIP`_ convention. # # - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_ # lookup tables. # # Additionally, the class `WCS` aggregates all of these transformations # together in a pipeline: # # - Detector to image plane correction (by a pair of # `~astropy.wcs.DistortionLookupTable` objects). # # - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip` # object) # # - `distortion paper`_ table-lookup correction (by a pair of # `~astropy.wcs.DistortionLookupTable` objects). # # - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object) # STDLIB import copy import uuid import io import itertools import os import re import textwrap import warnings import builtins # THIRD-PARTY import numpy as np # LOCAL from astropy import log from astropy.io import fits from . import docstrings from . import _wcs from astropy import units as u from astropy.utils.compat import possible_filename from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning from astropy.utils.decorators import deprecated_renamed_argument # Mix-in class that provides the APE 14 API from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS __all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs', 'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm', 'Auxprm', 'Celprm', 'Prjprm', 'Wtbarr', 'WCSBase', 'validate', 'WcsError', 'SingularMatrixError', 'InconsistentAxisTypesError', 'InvalidTransformError', 'InvalidCoordinateError', 'InvalidPrjParametersError', 'NoSolutionError', 'InvalidSubimageSpecificationError', 'NoConvergence', 'NonseparableSubimageCoordinateSystemError', 'NoWcsKeywordsFoundError', 'InvalidTabularParametersError'] __doctest_skip__ = ['WCS.all_world2pix'] if _wcs is not None: _parsed_version = _wcs.__version__.split('.') if int(_parsed_version[0]) == 5 and int(_parsed_version[1]) < 8: raise ImportError( "astropy.wcs is built with wcslib {0}, but only versions 5.8 and " "later on the 5.x series are known to work. The version of wcslib " "that ships with astropy may be used.") if not _wcs._sanity_check(): raise RuntimeError( "astropy.wcs did not pass its sanity check for your build " "on your platform.") WCSBase = _wcs._Wcs DistortionLookupTable = _wcs.DistortionLookupTable Sip = _wcs.Sip Wcsprm = _wcs.Wcsprm Auxprm = _wcs.Auxprm Celprm = _wcs.Celprm Prjprm = _wcs.Prjprm Tabprm = _wcs.Tabprm Wtbarr = _wcs.Wtbarr WcsError = _wcs.WcsError SingularMatrixError = _wcs.SingularMatrixError InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError InvalidTransformError = _wcs.InvalidTransformError InvalidCoordinateError = _wcs.InvalidCoordinateError NoSolutionError = _wcs.NoSolutionError InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError InvalidTabularParametersError = _wcs.InvalidTabularParametersError InvalidPrjParametersError = _wcs.InvalidPrjParametersError # Copy all the constants from the C extension into this module's namespace for key, val in _wcs.__dict__.items(): if key.startswith(('WCSSUB_', 'WCSHDR_', 'WCSHDO_', 'WCSCOMPARE_', 'PRJ_')): locals()[key] = val __all__.append(key) # Set coordinate extraction callback for WCS -TAB: def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim): arr = hdulist[(extnam, extver)].data[ttype][row - 1] if arr.ndim != ndim: if kind == 'c' and ndim == 2: arr = arr.reshape((arr.size, 1)) else: raise ValueError("Bad TDIM") return np.ascontiguousarray(arr, dtype=np.double) _wcs.set_wtbarr_fitsio_callback(_load_tab_bintable) else: WCSBase = object Wcsprm = object DistortionLookupTable = object Sip = object Tabprm = object Wtbarr = object WcsError = None SingularMatrixError = None InconsistentAxisTypesError = None InvalidTransformError = None InvalidCoordinateError = None NoSolutionError = None InvalidSubimageSpecificationError = None NonseparableSubimageCoordinateSystemError = None NoWcsKeywordsFoundError = None InvalidTabularParametersError = None # Additional relax bit flags WCSHDO_SIP = 0x80000 # Regular expression defining SIP keyword It matches keyword that starts with A # or B, optionally followed by P, followed by an underscore then a number in # range of 0-19, followed by an underscore and another number in range of 0-19. # Keyword optionally ends with a capital letter. SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''') def _parse_keysel(keysel): keysel_flags = 0 if keysel is not None: for element in keysel: if element.lower() == 'image': keysel_flags |= _wcs.WCSHDR_IMGHEAD elif element.lower() == 'binary': keysel_flags |= _wcs.WCSHDR_BIMGARR elif element.lower() == 'pixel': keysel_flags |= _wcs.WCSHDR_PIXLIST else: raise ValueError( "keysel must be a list of 'image', 'binary' " + "and/or 'pixel'") else: keysel_flags = -1 return keysel_flags class NoConvergence(Exception): """ An error class used to report non-convergence and/or divergence of numerical methods. It is used to report errors in the iterative solution used by the :py:meth:`~astropy.wcs.WCS.all_world2pix`. Attributes ---------- best_solution : `numpy.ndarray` Best solution achieved by the numerical method. accuracy : `numpy.ndarray` Accuracy of the ``best_solution``. niter : `int` Number of iterations performed by the numerical method to compute ``best_solution``. divergent : None, `numpy.ndarray` Indices of the points in ``best_solution`` array for which the solution appears to be divergent. If the solution does not diverge, ``divergent`` will be set to `None`. slow_conv : None, `numpy.ndarray` Indices of the solutions in ``best_solution`` array for which the solution failed to converge within the specified maximum number of iterations. If there are no non-converging solutions (i.e., if the required accuracy has been achieved for all input data points) then ``slow_conv`` will be set to `None`. """ def __init__(self, *args, best_solution=None, accuracy=None, niter=None, divergent=None, slow_conv=None, **kwargs): super().__init__(*args) self.best_solution = best_solution self.accuracy = accuracy self.niter = niter self.divergent = divergent self.slow_conv = slow_conv if kwargs: warnings.warn("Function received unexpected arguments ({}) these " "are ignored but will raise an Exception in the " "future.".format(list(kwargs)), AstropyDeprecationWarning) class FITSFixedWarning(AstropyWarning): """ The warning raised when the contents of the FITS header have been modified to be standards compliant. """ pass class WCS(FITSWCSAPIMixin, WCSBase): """WCS objects perform standard WCS transformations, and correct for `SIP`_ and `distortion paper`_ table-lookup transformations, based on the WCS keywords and supplementary data read from a FITS file. See also: https://docs.astropy.org/en/stable/wcs/ Parameters ---------- header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional If *header* is not provided or None, the object will be initialized to default values. fobj : `~astropy.io.fits.HDUList`, optional It is needed when header keywords point to a `distortion paper`_ lookup table stored in a different extension. key : str, optional The name of a particular WCS transform to use. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be provided if *header* is also provided. minerr : float, optional The minimum value a distortion correction must have in order to be applied. If the value of ``CQERRja`` is smaller than *minerr*, the corresponding distortion is not applied. relax : bool or int, optional Degree of permissiveness: - `True` (default): Admit all recognized informal extensions of the WCS standard. - `False`: Recognize only FITS keywords defined by the published WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. naxis : int or sequence, optional Extracts specific coordinate axes using :meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and *naxis* is not ``None``, *naxis* will be passed to :meth:`~astropy.wcs.Wcsprm.sub` in order to select specific axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for more details about this parameter. keysel : sequence of str, optional A sequence of flags used to select the keyword types considered by wcslib. When ``None``, only the standard image header keywords are considered (and the underlying wcspih() C function is called). To use binary table image array or pixel list keywords, *keysel* must be set. Each element in the list should be one of the following strings: - 'image': Image header keywords - 'binary': Binary table image array keywords - 'pixel': Pixel list keywords Keywords such as ``EQUIna`` or ``RFRQna`` that are common to binary table image arrays and pixel lists (including ``WCSNna`` and ``TWCSna``) are selected by both 'binary' and 'pixel'. colsel : sequence of int, optional A sequence of table column numbers used to restrict the WCS transformations considered to only those pertaining to the specified columns. If `None`, there is no restriction. fix : bool, optional When `True` (default), call `~astropy.wcs.Wcsprm.fix` on the resulting object to fix any non-standard uses in the header. `FITSFixedWarning` Warnings will be emitted if any changes were made. translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs none. See `WCS.fix` for more information about this parameter. Only effective when ``fix`` is `True`. Raises ------ MemoryError Memory allocation failed. ValueError Invalid key. KeyError Key not found in FITS header. ValueError Lookup table distortion present in the header but *fobj* was not provided. Notes ----- 1. astropy.wcs supports arbitrary *n* dimensions for the core WCS (the transformations handled by WCSLIB). However, the `distortion paper`_ lookup table and `SIP`_ distortions must be two dimensional. Therefore, if you try to create a WCS object where the core WCS has a different number of dimensions than 2 and that object also contains a `distortion paper`_ lookup table or `SIP`_ distortion, a `ValueError` exception will be raised. To avoid this, consider using the *naxis* kwarg to select two dimensions from the core WCS. 2. The number of coordinate axes in the transformation is not determined directly from the ``NAXIS`` keyword but instead from the highest of: - ``NAXIS`` keyword - ``WCSAXESa`` keyword - The highest axis number in any parameterized WCS keyword. The keyvalue, as well as the keyword, must be syntactically valid otherwise it will not be considered. If none of these keyword types is present, i.e. if the header only contains auxiliary WCS keywords for a particular coordinate representation, then no coordinate description is constructed for it. The number of axes, which is set as the ``naxis`` member, may differ for different coordinate representations of the same image. 3. When the header includes duplicate keywords, in most cases the last encountered is used. 4. `~astropy.wcs.Wcsprm.set` is called immediately after construction, so any invalid keywords or transformations will be raised by the constructor, not when subsequently calling a transformation method. """ # noqa: E501 def __init__(self, header=None, fobj=None, key=' ', minerr=0.0, relax=True, naxis=None, keysel=None, colsel=None, fix=True, translate_units='', _do_set=True): close_fds = [] # these parameters are stored to be used when unpickling a WCS object: self._init_kwargs = { 'keysel': copy.copy(keysel), 'colsel': copy.copy(colsel), } if header is None: if naxis is None: naxis = 2 wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, naxis=naxis) self.naxis = wcsprm.naxis # Set some reasonable defaults. det2im = (None, None) cpdis = (None, None) sip = None else: keysel_flags = _parse_keysel(keysel) if isinstance(header, (str, bytes)): try: is_path = (possible_filename(header) and os.path.exists(header)) except (OSError, ValueError): is_path = False if is_path: if fobj is not None: raise ValueError( "Can not provide both a FITS filename to " "argument 1 and a FITS file object to argument 2") fobj = fits.open(header) close_fds.append(fobj) header = fobj[0].header elif isinstance(header, fits.hdu.image._ImageBaseHDU): header = header.header elif not isinstance(header, fits.Header): try: # Accept any dict-like object orig_header = header header = fits.Header() for dict_key in orig_header.keys(): header[dict_key] = orig_header[dict_key] except TypeError: raise TypeError( "header must be a string, an astropy.io.fits.Header " "object, or a dict-like object") if isinstance(header, fits.Header): header_string = header.tostring().rstrip() else: header_string = header # Importantly, header is a *copy* of the passed-in header # because we will be modifying it if isinstance(header_string, str): header_bytes = header_string.encode('ascii') header_string = header_string else: header_bytes = header_string header_string = header_string.decode('ascii') if not (fobj is None or isinstance(fobj, fits.HDUList)): raise AssertionError("'fobj' must be either None or an " "astropy.io.fits.HDUList object.") est_naxis = 2 try: tmp_header = fits.Header.fromstring(header_string) self._remove_sip_kw(tmp_header) tmp_header_bytes = tmp_header.tostring().rstrip() if isinstance(tmp_header_bytes, str): tmp_header_bytes = tmp_header_bytes.encode('ascii') tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key, relax=relax, keysel=keysel_flags, colsel=colsel, warnings=False, hdulist=fobj) if naxis is not None: try: tmp_wcsprm = tmp_wcsprm.sub(naxis) except ValueError: pass est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2 except _wcs.NoWcsKeywordsFoundError: pass self.naxis = est_naxis header = fits.Header.fromstring(header_string) det2im = self._read_det2im_kw(header, fobj, err=minerr) cpdis = self._read_distortion_kw( header, fobj, dist='CPDIS', err=minerr) sip = self._read_sip_kw(header, wcskey=key) self._remove_sip_kw(header) header_string = header.tostring() header_string = header_string.replace('END' + ' ' * 77, '') if isinstance(header_string, str): header_bytes = header_string.encode('ascii') header_string = header_string else: header_bytes = header_string header_string = header_string.decode('ascii') try: wcsprm = _wcs.Wcsprm(header=header_bytes, key=key, relax=relax, keysel=keysel_flags, colsel=colsel, hdulist=fobj) except _wcs.NoWcsKeywordsFoundError: # The header may have SIP or distortions, but no core # WCS. That isn't an error -- we want a "default" # (identity) core Wcs transformation in that case. if colsel is None: wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, keysel=keysel_flags, colsel=colsel, hdulist=fobj) else: raise if naxis is not None: wcsprm = wcsprm.sub(naxis) self.naxis = wcsprm.naxis if (wcsprm.naxis != 2 and (det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)): raise ValueError( """ FITS WCS distortion paper lookup tables and SIP distortions only work in 2 dimensions. However, WCSLIB has detected {} dimensions in the core WCS keywords. To use core WCS in conjunction with FITS WCS distortion paper lookup tables or SIP distortion, you must select or reduce these to 2 dimensions using the naxis kwarg. """.format(wcsprm.naxis)) header_naxis = header.get('NAXIS', None) if header_naxis is not None and header_naxis < wcsprm.naxis: warnings.warn( "The WCS transformation has more axes ({:d}) than the " "image it is associated with ({:d})".format( wcsprm.naxis, header_naxis), FITSFixedWarning) self._get_naxis(header) WCSBase.__init__(self, sip, cpdis, wcsprm, det2im) if fix: if header is None: with warnings.catch_warnings(): warnings.simplefilter('ignore', FITSFixedWarning) self.fix(translate_units=translate_units) else: self.fix(translate_units=translate_units) if _do_set: self.wcs.set() for fd in close_fds: fd.close() self._pixel_bounds = None def __copy__(self): new_copy = self.__class__() WCSBase.__init__(new_copy, self.sip, (self.cpdis1, self.cpdis2), self.wcs, (self.det2im1, self.det2im2)) new_copy.__dict__.update(self.__dict__) return new_copy def __deepcopy__(self, memo): from copy import deepcopy new_copy = self.__class__() new_copy.naxis = deepcopy(self.naxis, memo) WCSBase.__init__(new_copy, deepcopy(self.sip, memo), (deepcopy(self.cpdis1, memo), deepcopy(self.cpdis2, memo)), deepcopy(self.wcs, memo), (deepcopy(self.det2im1, memo), deepcopy(self.det2im2, memo))) for key, val in self.__dict__.items(): new_copy.__dict__[key] = deepcopy(val, memo) return new_copy def copy(self): """ Return a shallow copy of the object. Convenience method so user doesn't have to import the :mod:`copy` stdlib module. .. warning:: Use `deepcopy` instead of `copy` unless you know why you need a shallow copy. """ return copy.copy(self) def deepcopy(self): """ Return a deep copy of the object. Convenience method so user doesn't have to import the :mod:`copy` stdlib module. """ return copy.deepcopy(self) def sub(self, axes=None): copy = self.deepcopy() # We need to know which axes have been dropped, but there is no easy # way to do this with the .sub function, so instead we assign UUIDs to # the CNAME parameters in copy.wcs. We can later access the original # CNAME properties from self.wcs. cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)] copy.wcs.cname = cname_uuid # Subset the WCS copy.wcs = copy.wcs.sub(axes) copy.naxis = copy.wcs.naxis # Construct a list of dimensions from the original WCS in the order # in which they appear in the final WCS. keep = [cname_uuid.index(cname) if cname in cname_uuid else None for cname in copy.wcs.cname] # Restore the original CNAMEs copy.wcs.cname = ['' if i is None else self.wcs.cname[i] for i in keep] # Subset pixel_shape and pixel_bounds if self.pixel_shape: copy.pixel_shape = tuple([None if i is None else self.pixel_shape[i] for i in keep]) if self.pixel_bounds: copy.pixel_bounds = [None if i is None else self.pixel_bounds[i] for i in keep] return copy if _wcs is not None: sub.__doc__ = _wcs.Wcsprm.sub.__doc__ def _fix_scamp(self): """ Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters are also present. Some projects (e.g., Palomar Transient Factory) convert SCAMP's distortion parameters (which abuse the PVi_m cards) to SIP. However, wcslib gets confused by the presence of both SCAMP and SIP distortion parameters. See https://github.com/astropy/astropy/issues/299. """ # Nothing to be done if no WCS attached if self.wcs is None: return # Nothing to be done if no PV parameters attached pv = self.wcs.get_pv() if not pv: return # Nothing to be done if axes don't use SIP distortion parameters if self.sip is None: return # Nothing to be done if any radial terms are present... # Loop over list to find any radial terms. # Certain values of the `j' index are used for storing # radial terms; refer to Equation (1) in # <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>. pv = np.asarray(pv) # Loop over distinct values of `i' index for i in set(pv[:, 0]): # Get all values of `j' index for this value of `i' index js = set(pv[:, 1][pv[:, 0] == i]) # Find max value of `j' index max_j = max(js) for j in (3, 11, 23, 39): if j < max_j and j in js: return self.wcs.set_pv([]) warnings.warn("Removed redundant SCAMP distortion parameters " + "because SIP parameters are also present", FITSFixedWarning) def fix(self, translate_units='', naxis=None): """ Perform the fix operations from wcslib, and warn about any changes it has made. Parameters ---------- translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs none. Although ``"S"`` is commonly used to represent seconds, its translation to ``"s"`` is potentially unsafe since the standard recognizes ``"S"`` formally as Siemens, however rarely that may be used. The same applies to ``"H"`` for hours (Henry), and ``"D"`` for days (Debye). This string controls what to do in such cases, and is case-insensitive. - If the string contains ``"s"``, translate ``"S"`` to ``"s"``. - If the string contains ``"h"``, translate ``"H"`` to ``"h"``. - If the string contains ``"d"``, translate ``"D"`` to ``"d"``. Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'`` does all of them. naxis : int array, optional Image axis lengths. If this array is set to zero or ``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be invoked. """ if self.wcs is not None: self._fix_scamp() fixes = self.wcs.fix(translate_units, naxis) for key, val in fixes.items(): if val != "No change": if (key == 'datfix' and '1858-11-17' in val and not np.count_nonzero(self.wcs.mjdref)): continue warnings.warn( ("'{0}' made the change '{1}'."). format(key, val), FITSFixedWarning) def calc_footprint(self, header=None, undistort=True, axes=None, center=True): """ Calculates the footprint of the image on the sky. A footprint is defined as the positions of the corners of the image on the sky after all available distortions have been applied. Parameters ---------- header : `~astropy.io.fits.Header` object, optional Used to get ``NAXIS1`` and ``NAXIS2`` header and axes are mutually exclusive, alternative ways to provide the same information. undistort : bool, optional If `True`, take SIP and distortion lookup table into account axes : (int, int), optional If provided, use the given sequence as the shape of the image. Otherwise, use the ``NAXIS1`` and ``NAXIS2`` keywords from the header that was used to create this `WCS` object. center : bool, optional If `True` use the center of the pixel, otherwise use the corner. Returns ------- coord : (4, 2) array of (*x*, *y*) coordinates. The order is clockwise starting with the bottom left corner. """ if axes is not None: naxis1, naxis2 = axes else: if header is None: try: # classes that inherit from WCS and define naxis1/2 # do not require a header parameter naxis1, naxis2 = self.pixel_shape except (AttributeError, TypeError): warnings.warn( "Need a valid header in order to calculate footprint\n", AstropyUserWarning) return None else: naxis1 = header.get('NAXIS1', None) naxis2 = header.get('NAXIS2', None) if naxis1 is None or naxis2 is None: raise ValueError( "Image size could not be determined.") if center: corners = np.array([[1, 1], [1, naxis2], [naxis1, naxis2], [naxis1, 1]], dtype=np.float64) else: corners = np.array([[0.5, 0.5], [0.5, naxis2 + 0.5], [naxis1 + 0.5, naxis2 + 0.5], [naxis1 + 0.5, 0.5]], dtype=np.float64) if undistort: return self.all_pix2world(corners, 1) else: return self.wcs_pix2world(corners, 1) def _read_det2im_kw(self, header, fobj, err=0.0): """ Create a `distortion paper`_ type lookup table for detector to image plane correction. """ if fobj is None: return (None, None) if not isinstance(fobj, fits.HDUList): return (None, None) try: axiscorr = header['AXISCORR'] d2imdis = self._read_d2im_old_format(header, fobj, axiscorr) return d2imdis except KeyError: pass dist = 'D2IMDIS' d_kw = 'D2IM' err_kw = 'D2IMERR' tables = {} for i in range(1, self.naxis + 1): d_error = header.get(err_kw + str(i), 0.0) if d_error < err: tables[i] = None continue distortion = dist + str(i) if distortion in header: dis = header[distortion].lower() if dis == 'lookup': del header[distortion] assert isinstance(fobj, fits.HDUList), ( 'An astropy.io.fits.HDUList' 'is required for Lookup table distortion.') dp = (d_kw + str(i)).strip() dp_extver_key = dp + '.EXTVER' if dp_extver_key in header: d_extver = header[dp_extver_key] del header[dp_extver_key] else: d_extver = 1 dp_axis_key = dp + f'.AXIS.{i:d}' if i == header[dp_axis_key]: d_data = fobj['D2IMARR', d_extver].data else: d_data = (fobj['D2IMARR', d_extver].data).transpose() del header[dp_axis_key] d_header = fobj['D2IMARR', d_extver].header d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0)) d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0)) d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0)) d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt) tables[i] = d_lookup else: warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning) for key in set(header): if key.startswith(dp + '.'): del header[key] else: tables[i] = None if not tables: return (None, None) else: return (tables.get(1), tables.get(2)) def _read_d2im_old_format(self, header, fobj, axiscorr): warnings.warn( "The use of ``AXISCORR`` for D2IM correction has been deprecated." "`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write " "out files without it.", AstropyDeprecationWarning) cpdis = [None, None] crpix = [0., 0.] crval = [0., 0.] cdelt = [1., 1.] try: d2im_data = fobj[('D2IMARR', 1)].data except KeyError: return (None, None) except AttributeError: return (None, None) d2im_data = np.array([d2im_data]) d2im_hdr = fobj[('D2IMARR', 1)].header naxis = d2im_hdr['NAXIS'] for i in range(1, naxis + 1): crpix[i - 1] = d2im_hdr.get('CRPIX' + str(i), 0.0) crval[i - 1] = d2im_hdr.get('CRVAL' + str(i), 0.0) cdelt[i - 1] = d2im_hdr.get('CDELT' + str(i), 1.0) cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt) if axiscorr == 1: return (cpdis, None) elif axiscorr == 2: return (None, cpdis) else: warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning) return (None, None) def _write_det2im(self, hdulist): """ Writes a `distortion paper`_ type lookup table to the given `~astropy.io.fits.HDUList`. """ if self.det2im1 is None and self.det2im2 is None: return dist = 'D2IMDIS' d_kw = 'D2IM' def write_d2i(num, det2im): if det2im is None: return hdulist[0].header[f'{dist}{num:d}'] = ( 'LOOKUP', 'Detector to image correction type') hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = ( num, 'Version number of WCSDVARR extension') hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = ( len(det2im.data.shape), 'Number of independent variables in D2IM function') for i in range(det2im.data.ndim): jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th') hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = ( i + 1, f'Axis number of the {jth} variable in a D2IM function') image = fits.ImageHDU(det2im.data, name='D2IMARR') header = image.header header['CRPIX1'] = (det2im.crpix[0], 'Coordinate system reference pixel') header['CRPIX2'] = (det2im.crpix[1], 'Coordinate system reference pixel') header['CRVAL1'] = (det2im.crval[0], 'Coordinate system value at reference pixel') header['CRVAL2'] = (det2im.crval[1], 'Coordinate system value at reference pixel') header['CDELT1'] = (det2im.cdelt[0], 'Coordinate increment along axis') header['CDELT2'] = (det2im.cdelt[1], 'Coordinate increment along axis') image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER']) hdulist.append(image) write_d2i(1, self.det2im1) write_d2i(2, self.det2im2) def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0): """ Reads `distortion paper`_ table-lookup keywords and data, and returns a 2-tuple of `~astropy.wcs.DistortionLookupTable` objects. If no `distortion paper`_ keywords are found, ``(None, None)`` is returned. """ if isinstance(header, (str, bytes)): return (None, None) if dist == 'CPDIS': d_kw = 'DP' err_kw = 'CPERR' else: d_kw = 'DQ' err_kw = 'CQERR' tables = {} for i in range(1, self.naxis + 1): d_error_key = err_kw + str(i) if d_error_key in header: d_error = header[d_error_key] del header[d_error_key] else: d_error = 0.0 if d_error < err: tables[i] = None continue distortion = dist + str(i) if distortion in header: dis = header[distortion].lower() del header[distortion] if dis == 'lookup': if not isinstance(fobj, fits.HDUList): raise ValueError('an astropy.io.fits.HDUList is ' 'required for Lookup table distortion.') dp = (d_kw + str(i)).strip() dp_extver_key = dp + '.EXTVER' if dp_extver_key in header: d_extver = header[dp_extver_key] del header[dp_extver_key] else: d_extver = 1 dp_axis_key = dp + f'.AXIS.{i:d}' if i == header[dp_axis_key]: d_data = fobj['WCSDVARR', d_extver].data else: d_data = (fobj['WCSDVARR', d_extver].data).transpose() del header[dp_axis_key] d_header = fobj['WCSDVARR', d_extver].header d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0)) d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0)) d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0)) d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt) tables[i] = d_lookup for key in set(header): if key.startswith(dp + '.'): del header[key] else: warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning) else: tables[i] = None if not tables: return (None, None) else: return (tables.get(1), tables.get(2)) def _write_distortion_kw(self, hdulist, dist='CPDIS'): """ Write out `distortion paper`_ keywords to the given `~astropy.io.fits.HDUList`. """ if self.cpdis1 is None and self.cpdis2 is None: return if dist == 'CPDIS': d_kw = 'DP' else: d_kw = 'DQ' def write_dist(num, cpdis): if cpdis is None: return hdulist[0].header[f'{dist}{num:d}'] = ( 'LOOKUP', 'Prior distortion function type') hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = ( num, 'Version number of WCSDVARR extension') hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = ( len(cpdis.data.shape), f'Number of independent variables in {dist} function') for i in range(cpdis.data.ndim): jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th') hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = ( i + 1, f'Axis number of the {jth} variable in a {dist} function') image = fits.ImageHDU(cpdis.data, name='WCSDVARR') header = image.header header['CRPIX1'] = (cpdis.crpix[0], 'Coordinate system reference pixel') header['CRPIX2'] = (cpdis.crpix[1], 'Coordinate system reference pixel') header['CRVAL1'] = (cpdis.crval[0], 'Coordinate system value at reference pixel') header['CRVAL2'] = (cpdis.crval[1], 'Coordinate system value at reference pixel') header['CDELT1'] = (cpdis.cdelt[0], 'Coordinate increment along axis') header['CDELT2'] = (cpdis.cdelt[1], 'Coordinate increment along axis') image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER']) hdulist.append(image) write_dist(1, self.cpdis1) write_dist(2, self.cpdis2) def _remove_sip_kw(self, header): """ Remove SIP information from a header. """ # Never pass SIP coefficients to wcslib # CTYPE must be passed with -SIP to wcslib for key in set(m.group() for m in map(SIP_KW.match, list(header)) if m is not None): del header[key] def _read_sip_kw(self, header, wcskey=""): """ Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip` object. If no `SIP`_ header keywords are found, ``None`` is returned. """ if isinstance(header, (str, bytes)): # TODO: Parse SIP from a string without pyfits around return None if "A_ORDER" in header and header['A_ORDER'] > 1: if "B_ORDER" not in header: raise ValueError( "A_ORDER provided without corresponding B_ORDER " "keyword for SIP distortion") m = int(header["A_ORDER"]) a = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"A_{i}_{j}" if key in header: a[i, j] = header[key] del header[key] m = int(header["B_ORDER"]) if m > 1: b = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"B_{i}_{j}" if key in header: b[i, j] = header[key] del header[key] else: a = None b = None del header['A_ORDER'] del header['B_ORDER'] ctype = [header[f'CTYPE{nax}{wcskey}'] for nax in range(1, self.naxis + 1)] if any(not ctyp.endswith('-SIP') for ctyp in ctype): message = """ Inconsistent SIP distortion information is present in the FITS header and the WCS object: SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix. astropy.wcs is using the SIP distortion coefficients, therefore the coordinates calculated here might be incorrect. If you do not want to apply the SIP distortion coefficients, please remove the SIP coefficients from the FITS header or the WCS object. As an example, if the image is already distortion-corrected (e.g., drizzled) then distortion components should not apply and the SIP coefficients should be removed. While the SIP distortion coefficients are being applied here, if that was indeed the intent, for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object. """ # noqa: E501 log.info(message) elif "B_ORDER" in header and header['B_ORDER'] > 1: raise ValueError( "B_ORDER provided without corresponding A_ORDER " + "keyword for SIP distortion") else: a = None b = None if "AP_ORDER" in header and header['AP_ORDER'] > 1: if "BP_ORDER" not in header: raise ValueError( "AP_ORDER provided without corresponding BP_ORDER " "keyword for SIP distortion") m = int(header["AP_ORDER"]) ap = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"AP_{i}_{j}" if key in header: ap[i, j] = header[key] del header[key] m = int(header["BP_ORDER"]) if m > 1: bp = np.zeros((m + 1, m + 1), np.double) for i in range(m + 1): for j in range(m - i + 1): key = f"BP_{i}_{j}" if key in header: bp[i, j] = header[key] del header[key] else: ap = None bp = None del header['AP_ORDER'] del header['BP_ORDER'] elif "BP_ORDER" in header and header['BP_ORDER'] > 1: raise ValueError( "BP_ORDER provided without corresponding AP_ORDER " "keyword for SIP distortion") else: ap = None bp = None if a is None and b is None and ap is None and bp is None: return None if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header: raise ValueError( "Header has SIP keywords without CRPIX keywords") crpix1 = header.get(f"CRPIX1{wcskey}") crpix2 = header.get(f"CRPIX2{wcskey}") return Sip(a, b, ap, bp, (crpix1, crpix2)) def _write_sip_kw(self): """ Write out SIP keywords. Returns a dictionary of key-value pairs. """ if self.sip is None: return {} keywords = {} def write_array(name, a): if a is None: return size = a.shape[0] trdir = 'sky to detector' if name[-1] == 'P' else 'detector to sky' comment = ('SIP polynomial order, axis {:d}, {:s}' .format(ord(name[0]) - ord('A'), trdir)) keywords[f'{name}_ORDER'] = size - 1, comment comment = 'SIP distortion coefficient' for i in range(size): for j in range(size - i): if a[i, j] != 0.0: keywords[ f'{name}_{i:d}_{j:d}'] = a[i, j], comment write_array('A', self.sip.a) write_array('B', self.sip.b) write_array('AP', self.sip.ap) write_array('BP', self.sip.bp) return keywords def _denormalize_sky(self, sky): if self.wcs.lngtyp != 'RA': raise ValueError( "WCS does not have longitude type of 'RA', therefore " + "(ra, dec) data can not be used as input") if self.wcs.lattyp != 'DEC': raise ValueError( "WCS does not have longitude type of 'DEC', therefore " + "(ra, dec) data can not be used as input") if self.wcs.naxis == 2: if self.wcs.lng == 0 and self.wcs.lat == 1: return sky elif self.wcs.lng == 1 and self.wcs.lat == 0: # Reverse the order of the columns return sky[:, ::-1] else: raise ValueError( "WCS does not have longitude and latitude celestial " + "axes, therefore (ra, dec) data can not be used as input") else: if self.wcs.lng < 0 or self.wcs.lat < 0: raise ValueError( "WCS does not have both longitude and latitude " "celestial axes, therefore (ra, dec) data can not be " + "used as input") out = np.zeros((sky.shape[0], self.wcs.naxis)) out[:, self.wcs.lng] = sky[:, 0] out[:, self.wcs.lat] = sky[:, 1] return out def _normalize_sky(self, sky): if self.wcs.lngtyp != 'RA': raise ValueError( "WCS does not have longitude type of 'RA', therefore " + "(ra, dec) data can not be returned") if self.wcs.lattyp != 'DEC': raise ValueError( "WCS does not have longitude type of 'DEC', therefore " + "(ra, dec) data can not be returned") if self.wcs.naxis == 2: if self.wcs.lng == 0 and self.wcs.lat == 1: return sky elif self.wcs.lng == 1 and self.wcs.lat == 0: # Reverse the order of the columns return sky[:, ::-1] else: raise ValueError( "WCS does not have longitude and latitude celestial " "axes, therefore (ra, dec) data can not be returned") else: if self.wcs.lng < 0 or self.wcs.lat < 0: raise ValueError( "WCS does not have both longitude and latitude celestial " "axes, therefore (ra, dec) data can not be returned") out = np.empty((sky.shape[0], 2)) out[:, 0] = sky[:, self.wcs.lng] out[:, 1] = sky[:, self.wcs.lat] return out def _array_converter(self, func, sky, *args, ra_dec_order=False): """ A helper function to support reading either a pair of arrays or a single Nx2 array. """ def _return_list_of_arrays(axes, origin): if any([x.size == 0 for x in axes]): return axes try: axes = np.broadcast_arrays(*axes) except ValueError: raise ValueError( "Coordinate arrays are not broadcastable to each other") xy = np.hstack([x.reshape((x.size, 1)) for x in axes]) if ra_dec_order and sky == 'input': xy = self._denormalize_sky(xy) output = func(xy, origin) if ra_dec_order and sky == 'output': output = self._normalize_sky(output) return (output[:, 0].reshape(axes[0].shape), output[:, 1].reshape(axes[0].shape)) return [output[:, i].reshape(axes[0].shape) for i in range(output.shape[1])] def _return_single_array(xy, origin): if xy.shape[-1] != self.naxis: raise ValueError( "When providing two arguments, the array must be " "of shape (N, {})".format(self.naxis)) if 0 in xy.shape: return xy if ra_dec_order and sky == 'input': xy = self._denormalize_sky(xy) result = func(xy, origin) if ra_dec_order and sky == 'output': result = self._normalize_sky(result) return result if len(args) == 2: try: xy, origin = args xy = np.asarray(xy) origin = int(origin) except Exception: raise TypeError( "When providing two arguments, they must be " "(coords[N][{}], origin)".format(self.naxis)) if xy.shape == () or len(xy.shape) == 1: return _return_list_of_arrays([xy], origin) return _return_single_array(xy, origin) elif len(args) == self.naxis + 1: axes = args[:-1] origin = args[-1] try: axes = [np.asarray(x) for x in axes] origin = int(origin) except Exception: raise TypeError( "When providing more than two arguments, they must be " + "a 1-D array for each axis, followed by an origin.") return _return_list_of_arrays(axes, origin) raise TypeError( "WCS projection has {0} dimensions, so expected 2 (an Nx{0} array " "and the origin argument) or {1} arguments (the position in each " "dimension, and the origin argument). Instead, {2} arguments were " "given.".format( self.naxis, self.naxis + 1, len(args))) def all_pix2world(self, *args, **kwargs): return self._array_converter( self._all_pix2world, 'output', *args, **kwargs) all_pix2world.__doc__ = """ Transforms pixel coordinates to world coordinates. Performs all of the following in series: - Detector to image plane correction (if present in the FITS file) - `SIP`_ distortion correction (if present in the FITS file) - `distortion paper`_ table-lookup correction (if present in the FITS file) - `wcslib`_ "core" WCS transformation Parameters ---------- {} For a transformation that is not two-dimensional, the two-argument form must be used. {} Returns ------- {} Notes ----- The order of the axes for the result is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('sky coordinates, in degrees', 8)) def wcs_pix2world(self, *args, **kwargs): if self.wcs is None: raise ValueError("No basic WCS settings were created.") return self._array_converter( lambda xy, o: self.wcs.p2s(xy, o)['world'], 'output', *args, **kwargs) wcs_pix2world.__doc__ = """ Transforms pixel coordinates to world coordinates by doing only the basic `wcslib`_ transformation. No `SIP`_ or `distortion paper`_ table lookup correction is applied. To perform distortion correction, see `~astropy.wcs.WCS.all_pix2world`, `~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`, or `~astropy.wcs.WCS.pix2foc`. Parameters ---------- {} For a transformation that is not two-dimensional, the two-argument form must be used. {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. Notes ----- The order of the axes for the result is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('world coordinates, in degrees', 8)) def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive, detect_divergence, quiet): # ############################################################ # # DESCRIPTION OF THE NUMERICAL METHOD ## # ############################################################ # In this section I will outline the method of solving # the inverse problem of converting world coordinates to # pixel coordinates (*inverse* of the direct transformation # `all_pix2world`) and I will summarize some of the aspects # of the method proposed here and some of the issues of the # original `all_world2pix` (in relation to this method) # discussed in https://github.com/astropy/astropy/issues/1977 # A more detailed discussion can be found here: # https://github.com/astropy/astropy/pull/2373 # # # ### Background ### # # # I will refer here to the [SIP Paper] # (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf). # According to this paper, the effect of distortions as # described in *their* equation (1) is: # # (1) x = CD*(u+f(u)), # # where `x` is a *vector* of "intermediate spherical # coordinates" (equivalent to (x,y) in the paper) and `u` # is a *vector* of "pixel coordinates", and `f` is a vector # function describing geometrical distortions # (see equations 2 and 3 in SIP Paper. # However, I prefer to use `w` for "intermediate world # coordinates", `x` for pixel coordinates, and assume that # transformation `W` performs the **linear** # (CD matrix + projection onto celestial sphere) part of the # conversion from pixel coordinates to world coordinates. # Then we can re-write (1) as: # # (2) w = W*(x+f(x)) = T(x) # # In `astropy.wcs.WCS` transformation `W` is represented by # the `wcs_pix2world` member, while the combined ("total") # transformation (linear part + distortions) is performed by # `all_pix2world`. Below I summarize the notations and their # equivalents in `astropy.wcs.WCS`: # # | Equation term | astropy.WCS/meaning | # | ------------- | ---------------------------- | # | `x` | pixel coordinates | # | `w` | world coordinates | # | `W` | `wcs_pix2world()` | # | `W^{-1}` | `wcs_world2pix()` | # | `T` | `all_pix2world()` | # | `x+f(x)` | `pix2foc()` | # # # ### Direct Solving of Equation (2) ### # # # In order to find the pixel coordinates that correspond to # given world coordinates `w`, it is necessary to invert # equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)` # for `x`. However, this approach has the following # disadvantages: # 1. It requires unnecessary transformations (see next # section). # 2. It is prone to "RA wrapping" issues as described in # https://github.com/astropy/astropy/issues/1977 # (essentially because `all_pix2world` may return points with # a different phase than user's input `w`). # # # ### Description of the Method Used here ### # # # By applying inverse linear WCS transformation (`W^{-1}`) # to both sides of equation (2) and introducing notation `x'` # (prime) for the pixels coordinates obtained from the world # coordinates by applying inverse *linear* WCS transformation # ("focal plane coordinates"): # # (3) x' = W^{-1}(w) # # we obtain the following equation: # # (4) x' = x+f(x), # # or, # # (5) x = x'-f(x) # # This equation is well suited for solving using the method # of fixed-point iterations # (http://en.wikipedia.org/wiki/Fixed-point_iteration): # # (6) x_{i+1} = x'-f(x_i) # # As an initial value of the pixel coordinate `x_0` we take # "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`. # We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also # consider the process to be diverging if # `|x_{i+1}-x_i|>|x_i-x_{i-1}|` # **when** `|x_{i+1}-x_i|>=tolerance` (when current # approximation is close to the true solution, # `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors # and we ignore such "divergences" when # `|x_{i+1}-x_i|<tolerance`). It may appear that checking for # `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is # unnecessary since the iterative process should stop anyway, # however, the proposed implementation of this iterative # process is completely vectorized and, therefore, we may # continue iterating over *some* points even though they have # converged to within a specified tolerance (while iterating # over other points that have not yet converged to # a solution). # # In order to efficiently implement iterative process (6) # using available methods in `astropy.wcs.WCS`, we add and # subtract `x_i` from the right side of equation (6): # # (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i, # # where `x'=wcs_world2pix(w)` and it is computed only *once* # before the beginning of the iterative process (and we also # set `x_0=x'`). By using `pix2foc` at each iteration instead # of `all_pix2world` we get about 25% increase in performance # (by not performing the linear `W` transformation at each # step) and we also avoid the "RA wrapping" issue described # above (by working in focal plane coordinates and avoiding # pix->world transformations). # # As an added benefit, the process converges to the correct # solution in just one iteration when distortions are not # present (compare to # https://github.com/astropy/astropy/issues/1977 and # https://github.com/astropy/astropy/pull/2294): in this case # `pix2foc` is the identical transformation # `x_i=pix2foc(x_i)` and from equation (7) we get: # # x' = x_0 = wcs_world2pix(w) # x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x' # = wcs_world2pix(w) = x_0 # => # |x_1-x_0| = 0 < tolerance (with tolerance > 0) # # However, for performance reasons, it is still better to # avoid iterations altogether and return the exact linear # solution (`wcs_world2pix`) right-away when non-linear # distortions are not present by checking that attributes # `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are # *all* `None`. # # # ### Outline of the Algorithm ### # # # While the proposed code is relatively long (considering # the simplicity of the algorithm), this is due to: 1) # checking if iterative solution is necessary at all; 2) # checking for divergence; 3) re-implementation of the # completely vectorized algorithm as an "adaptive" vectorized # algorithm (for cases when some points diverge for which we # want to stop iterations). In my tests, the adaptive version # of the algorithm is about 50% slower than non-adaptive # version for all HST images. # # The essential part of the vectorized non-adaptive algorithm # (without divergence and other checks) can be described # as follows: # # pix0 = self.wcs_world2pix(world, origin) # pix = pix0.copy() # 0-order solution # # for k in range(maxiter): # # find correction to the previous solution: # dpix = self.pix2foc(pix, origin) - pix0 # # # compute norm (L2) of the correction: # dn = np.linalg.norm(dpix, axis=1) # # # apply correction: # pix -= dpix # # # check convergence: # if np.max(dn) < tolerance: # break # # return pix # # Here, the input parameter `world` can be a `MxN` array # where `M` is the number of coordinate axes in WCS and `N` # is the number of points to be converted simultaneously to # image coordinates. # # # ### IMPORTANT NOTE: ### # # If, in the future releases of the `~astropy.wcs`, # `pix2foc` will not apply all the required distortion # corrections then in the code below, calls to `pix2foc` will # have to be replaced with # wcs_world2pix(all_pix2world(pix_list, origin), origin) # # ############################################################ # # INITIALIZE ITERATIVE PROCESS: ## # ############################################################ # initial approximation (linear WCS based only) pix0 = self.wcs_world2pix(world, origin) # Check that an iterative solution is required at all # (when any of the non-CD-matrix-based corrections are # present). If not required return the initial # approximation (pix0). if not self.has_distortion: # No non-WCS corrections detected so # simply return initial approximation: return pix0 pix = pix0.copy() # 0-order solution # initial correction: dpix = self.pix2foc(pix, origin) - pix0 # Update initial solution: pix -= dpix # Norm (L2) squared of the correction: dn = np.sum(dpix*dpix, axis=1) dnprev = dn.copy() # if adaptive else dn tol2 = tolerance**2 # Prepare for iterative process k = 1 ind = None inddiv = None # Turn off numpy runtime warnings for 'invalid' and 'over': old_invalid = np.geterr()['invalid'] old_over = np.geterr()['over'] np.seterr(invalid='ignore', over='ignore') # ############################################################ # # NON-ADAPTIVE ITERATIONS: ## # ############################################################ if not adaptive: # Fixed-point iterations: while (np.nanmax(dn) >= tol2 and k < maxiter): # Find correction to the previous solution: dpix = self.pix2foc(pix, origin) - pix0 # Compute norm (L2) squared of the correction: dn = np.sum(dpix*dpix, axis=1) # Check for divergence (we do this in two stages # to optimize performance for the most common # scenario when successive approximations converge): if detect_divergence: divergent = (dn >= dnprev) if np.any(divergent): # Find solutions that have not yet converged: slowconv = (dn >= tol2) inddiv, = np.where(divergent & slowconv) if inddiv.shape[0] > 0: # Update indices of elements that # still need correction: conv = (dn < dnprev) iconv = np.where(conv) # Apply correction: dpixgood = dpix[iconv] pix[iconv] -= dpixgood dpix[iconv] = dpixgood # For the next iteration choose # non-divergent points that have not yet # converged to the requested accuracy: ind, = np.where(slowconv & conv) pix0 = pix0[ind] dnprev[ind] = dn[ind] k += 1 # Switch to adaptive iterations: adaptive = True break # Save current correction magnitudes for later: dnprev = dn # Apply correction: pix -= dpix k += 1 # ############################################################ # # ADAPTIVE ITERATIONS: ## # ############################################################ if adaptive: if ind is None: ind, = np.where(np.isfinite(pix).all(axis=1)) pix0 = pix0[ind] # "Adaptive" fixed-point iterations: while (ind.shape[0] > 0 and k < maxiter): # Find correction to the previous solution: dpixnew = self.pix2foc(pix[ind], origin) - pix0 # Compute norm (L2) of the correction: dnnew = np.sum(np.square(dpixnew), axis=1) # Bookkeeping of corrections: dnprev[ind] = dn[ind].copy() dn[ind] = dnnew if detect_divergence: # Find indices of pixels that are converging: conv = (dnnew < dnprev[ind]) iconv = np.where(conv) iiconv = ind[iconv] # Apply correction: dpixgood = dpixnew[iconv] pix[iiconv] -= dpixgood dpix[iiconv] = dpixgood # Find indices of solutions that have not yet # converged to the requested accuracy # AND that do not diverge: subind, = np.where((dnnew >= tol2) & conv) else: # Apply correction: pix[ind] -= dpixnew dpix[ind] = dpixnew # Find indices of solutions that have not yet # converged to the requested accuracy: subind, = np.where(dnnew >= tol2) # Choose solutions that need more iterations: ind = ind[subind] pix0 = pix0[subind] k += 1 # ############################################################ # # FINAL DETECTION OF INVALID, DIVERGING, ## # # AND FAILED-TO-CONVERGE POINTS ## # ############################################################ # Identify diverging and/or invalid points: invalid = ((~np.all(np.isfinite(pix), axis=1)) & (np.all(np.isfinite(world), axis=1))) # When detect_divergence==False, dnprev is outdated # (it is the norm of the very first correction). # Still better than nothing... inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid) if inddiv.shape[0] == 0: inddiv = None # Identify points that did not converge within 'maxiter' # iterations: if k >= maxiter: ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid)) if ind.shape[0] == 0: ind = None else: ind = None # Restore previous numpy error settings: np.seterr(invalid=old_invalid, over=old_over) # ############################################################ # # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ## # # DATA POINTS HAVE BEEN DETECTED: ## # ############################################################ if (ind is not None or inddiv is not None) and not quiet: if inddiv is None: raise NoConvergence( "'WCS.all_world2pix' failed to " "converge to the requested accuracy after {:d} " "iterations.".format(k), best_solution=pix, accuracy=np.abs(dpix), niter=k, slow_conv=ind, divergent=None) else: raise NoConvergence( "'WCS.all_world2pix' failed to " "converge to the requested accuracy.\n" "After {:d} iterations, the solution is diverging " "at least for one input point." .format(k), best_solution=pix, accuracy=np.abs(dpix), niter=k, slow_conv=ind, divergent=inddiv) return pix @deprecated_renamed_argument('accuracy', 'tolerance', '4.3') def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False, detect_divergence=True, quiet=False, **kwargs): if self.wcs is None: raise ValueError("No basic WCS settings were created.") return self._array_converter( lambda *args, **kwargs: self._all_world2pix( *args, tolerance=tolerance, maxiter=maxiter, adaptive=adaptive, detect_divergence=detect_divergence, quiet=quiet), 'input', *args, **kwargs ) all_world2pix.__doc__ = """ all_world2pix(*arg, tolerance=1.0e-4, maxiter=20, adaptive=False, detect_divergence=True, quiet=False) Transforms world coordinates to pixel coordinates, using numerical iteration to invert the full forward transformation `~astropy.wcs.WCS.all_pix2world` with complete distortion model. Parameters ---------- {0} For a transformation that is not two-dimensional, the two-argument form must be used. {1} tolerance : float, optional (default = 1.0e-4) Tolerance of solution. Iteration terminates when the iterative solver estimates that the "true solution" is within this many pixels current estimate, more specifically, when the correction to the solution found during the previous iteration is smaller (in the sense of the L2 norm) than ``tolerance``. maxiter : int, optional (default = 20) Maximum number of iterations allowed to reach a solution. quiet : bool, optional (default = False) Do not throw :py:class:`NoConvergence` exceptions when the method does not converge to a solution with the required accuracy within a specified number of maximum iterations set by ``maxiter`` parameter. Instead, simply return the found solution. Other Parameters ---------------- adaptive : bool, optional (default = False) Specifies whether to adaptively select only points that did not converge to a solution within the required accuracy for the next iteration. Default is recommended for HST as well as most other instruments. .. note:: The :py:meth:`all_world2pix` uses a vectorized implementation of the method of consecutive approximations (see ``Notes`` section below) in which it iterates over *all* input points *regardless* until the required accuracy has been reached for *all* input points. In some cases it may be possible that *almost all* points have reached the required accuracy but there are only a few of input data points for which additional iterations may be needed (this depends mostly on the characteristics of the geometric distortions for a given instrument). In this situation it may be advantageous to set ``adaptive`` = `True` in which case :py:meth:`all_world2pix` will continue iterating *only* over the points that have not yet converged to the required accuracy. However, for the HST's ACS/WFC detector, which has the strongest distortions of all HST instruments, testing has shown that enabling this option would lead to a about 50-100% penalty in computational time (depending on specifics of the image, geometric distortions, and number of input points to be converted). Therefore, for HST and possibly instruments, it is recommended to set ``adaptive`` = `False`. The only danger in getting this setting wrong will be a performance penalty. .. note:: When ``detect_divergence`` is `True`, :py:meth:`all_world2pix` will automatically switch to the adaptive algorithm once divergence has been detected. detect_divergence : bool, optional (default = True) Specifies whether to perform a more detailed analysis of the convergence to a solution. Normally :py:meth:`all_world2pix` may not achieve the required accuracy if either the ``tolerance`` or ``maxiter`` arguments are too low. However, it may happen that for some geometric distortions the conditions of convergence for the the method of consecutive approximations used by :py:meth:`all_world2pix` may not be satisfied, in which case consecutive approximations to the solution will diverge regardless of the ``tolerance`` or ``maxiter`` settings. When ``detect_divergence`` is `False`, these divergent points will be detected as not having achieved the required accuracy (without further details). In addition, if ``adaptive`` is `False` then the algorithm will not know that the solution (for specific points) is diverging and will continue iterating and trying to "improve" diverging solutions. This may result in ``NaN`` or ``Inf`` values in the return results (in addition to a performance penalties). Even when ``detect_divergence`` is `False`, :py:meth:`all_world2pix`, at the end of the iterative process, will identify invalid results (``NaN`` or ``Inf``) as "diverging" solutions and will raise :py:class:`NoConvergence` unless the ``quiet`` parameter is set to `True`. When ``detect_divergence`` is `True`, :py:meth:`all_world2pix` will detect points for which current correction to the coordinates is larger than the correction applied during the previous iteration **if** the requested accuracy **has not yet been achieved**. In this case, if ``adaptive`` is `True`, these points will be excluded from further iterations and if ``adaptive`` is `False`, :py:meth:`all_world2pix` will automatically switch to the adaptive algorithm. Thus, the reported divergent solution will be the latest converging solution computed immediately *before* divergence has been detected. .. note:: When accuracy has been achieved, small increases in current corrections may be possible due to rounding errors (when ``adaptive`` is `False`) and such increases will be ignored. .. note:: Based on our testing using HST ACS/WFC images, setting ``detect_divergence`` to `True` will incur about 5-20% performance penalty with the larger penalty corresponding to ``adaptive`` set to `True`. Because the benefits of enabling this feature outweigh the small performance penalty, especially when ``adaptive`` = `False`, it is recommended to set ``detect_divergence`` to `True`, unless extensive testing of the distortion models for images from specific instruments show a good stability of the numerical method for a wide range of coordinates (even outside the image itself). .. note:: Indices of the diverging inverse solutions will be reported in the ``divergent`` attribute of the raised :py:class:`NoConvergence` exception object. Returns ------- {2} Notes ----- The order of the axes for the input world array is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp`, and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. Using the method of fixed-point iterations approximations we iterate starting with the initial approximation, which is computed using the non-distortion-aware :py:meth:`wcs_world2pix` (or equivalent). The :py:meth:`all_world2pix` function uses a vectorized implementation of the method of consecutive approximations and therefore it is highly efficient (>30x) when *all* data points that need to be converted from sky coordinates to image coordinates are passed at *once*. Therefore, it is advisable, whenever possible, to pass as input a long array of all points that need to be converted to :py:meth:`all_world2pix` instead of calling :py:meth:`all_world2pix` for each data point. Also see the note to the ``adaptive`` parameter. Raises ------ NoConvergence The method did not converge to a solution to the required accuracy within a specified number of maximum iterations set by the ``maxiter`` parameter. To turn off this exception, set ``quiet`` to `True`. Indices of the points for which the requested accuracy was not achieved (if any) will be listed in the ``slow_conv`` attribute of the raised :py:class:`NoConvergence` exception object. See :py:class:`NoConvergence` documentation for more details. MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. Examples -------- >>> import astropy.io.fits as fits >>> import astropy.wcs as wcs >>> import numpy as np >>> import os >>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits') >>> hdulist = fits.open(filename) >>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist) >>> hdulist.close() >>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1) >>> print(ra) # doctest: +FLOAT_CMP [ 5.52645627 5.52649663 5.52653698] >>> print(dec) # doctest: +FLOAT_CMP [-72.05171757 -72.05171276 -72.05170795] >>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1) >>> print(radec) # doctest: +FLOAT_CMP [[ 5.52645627 -72.05171757] [ 5.52649663 -72.05171276] [ 5.52653698 -72.05170795]] >>> x, y = w.all_world2pix(ra, dec, 1) >>> print(x) # doctest: +FLOAT_CMP [ 1.00000238 2.00000237 3.00000236] >>> print(y) # doctest: +FLOAT_CMP [ 0.99999996 0.99999997 0.99999997] >>> xy = w.all_world2pix(radec, 1) >>> print(xy) # doctest: +FLOAT_CMP [[ 1.00000238 0.99999996] [ 2.00000237 0.99999997] [ 3.00000236 0.99999997]] >>> xy = w.all_world2pix(radec, 1, maxiter=3, ... tolerance=1.0e-10, quiet=False) Traceback (most recent call last): ... NoConvergence: 'WCS.all_world2pix' failed to converge to the requested accuracy. After 3 iterations, the solution is diverging at least for one input point. >>> # Now try to use some diverging data: >>> divradec = w.all_pix2world([[1.0, 1.0], ... [10000.0, 50000.0], ... [3.0, 1.0]], 1) >>> print(divradec) # doctest: +FLOAT_CMP [[ 5.52645627 -72.05171757] [ 7.15976932 -70.8140779 ] [ 5.52653698 -72.05170795]] >>> # First, turn detect_divergence on: >>> try: # doctest: +FLOAT_CMP ... xy = w.all_world2pix(divradec, 1, maxiter=20, ... tolerance=1.0e-4, adaptive=False, ... detect_divergence=True, ... quiet=False) ... except wcs.wcs.NoConvergence as e: ... print("Indices of diverging points: {{0}}" ... .format(e.divergent)) ... print("Indices of poorly converging points: {{0}}" ... .format(e.slow_conv)) ... print("Best solution:\\n{{0}}".format(e.best_solution)) ... print("Achieved accuracy:\\n{{0}}".format(e.accuracy)) Indices of diverging points: [1] Indices of poorly converging points: None Best solution: [[ 1.00000238e+00 9.99999965e-01] [ -1.99441636e+06 1.44309097e+06] [ 3.00000236e+00 9.99999966e-01]] Achieved accuracy: [[ 6.13968380e-05 8.59638593e-07] [ 8.59526812e+11 6.61713548e+11] [ 6.09398446e-05 8.38759724e-07]] >>> raise e Traceback (most recent call last): ... NoConvergence: 'WCS.all_world2pix' failed to converge to the requested accuracy. After 5 iterations, the solution is diverging at least for one input point. >>> # This time turn detect_divergence off: >>> try: # doctest: +FLOAT_CMP ... xy = w.all_world2pix(divradec, 1, maxiter=20, ... tolerance=1.0e-4, adaptive=False, ... detect_divergence=False, ... quiet=False) ... except wcs.wcs.NoConvergence as e: ... print("Indices of diverging points: {{0}}" ... .format(e.divergent)) ... print("Indices of poorly converging points: {{0}}" ... .format(e.slow_conv)) ... print("Best solution:\\n{{0}}".format(e.best_solution)) ... print("Achieved accuracy:\\n{{0}}".format(e.accuracy)) Indices of diverging points: [1] Indices of poorly converging points: None Best solution: [[ 1.00000009 1. ] [ nan nan] [ 3.00000009 1. ]] Achieved accuracy: [[ 2.29417358e-06 3.21222995e-08] [ nan nan] [ 2.27407877e-06 3.13005639e-08]] >>> raise e Traceback (most recent call last): ... NoConvergence: 'WCS.all_world2pix' failed to converge to the requested accuracy. After 6 iterations, the solution is diverging at least for one input point. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('pixel coordinates', 8)) def wcs_world2pix(self, *args, **kwargs): if self.wcs is None: raise ValueError("No basic WCS settings were created.") return self._array_converter( lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'], 'input', *args, **kwargs) wcs_world2pix.__doc__ = """ Transforms world coordinates to pixel coordinates, using only the basic `wcslib`_ WCS transformation. No `SIP`_ or `distortion paper`_ table lookup transformation is applied. Parameters ---------- {} For a transformation that is not two-dimensional, the two-argument form must be used. {} Returns ------- {} Notes ----- The order of the axes for the input world array is determined by the ``CTYPEia`` keywords in the FITS header, therefore it may not always be of the form (*ra*, *dec*). The `~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`, `~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp` members can be used to determine the order of the axes. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('naxis', 8), docstrings.RA_DEC_ORDER(8), docstrings.RETURNS('pixel coordinates', 8)) def pix2foc(self, *args): return self._array_converter(self._pix2foc, None, *args) pix2foc.__doc__ = """ Convert pixel coordinates to focal plane coordinates using the `SIP`_ polynomial distortion convention and `distortion paper`_ table-lookup correction. The output is in absolute pixel coordinates, not relative to ``CRPIX``. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('focal coordinates', 8)) def p4_pix2foc(self, *args): return self._array_converter(self._p4_pix2foc, None, *args) p4_pix2foc.__doc__ = """ Convert pixel coordinates to focal plane coordinates using `distortion paper`_ table-lookup correction. The output is in absolute pixel coordinates, not relative to ``CRPIX``. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('focal coordinates', 8)) def det2im(self, *args): return self._array_converter(self._det2im, None, *args) det2im.__doc__ = """ Convert detector coordinates to image plane coordinates using `distortion paper`_ table-lookup correction. The output is in absolute pixel coordinates, not relative to ``CRPIX``. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('pixel coordinates', 8)) def sip_pix2foc(self, *args): if self.sip is None: if len(args) == 2: return args[0] elif len(args) == 3: return args[:2] else: raise TypeError("Wrong number of arguments") return self._array_converter(self.sip.pix2foc, None, *args) sip_pix2foc.__doc__ = """ Convert pixel coordinates to focal plane coordinates using the `SIP`_ polynomial distortion convention. The output is in pixel coordinates, relative to ``CRPIX``. FITS WCS `distortion paper`_ table lookup correction is not applied, even if that information existed in the FITS file that initialized this :class:`~astropy.wcs.WCS` object. To correct for that, use `~astropy.wcs.WCS.pix2foc` or `~astropy.wcs.WCS.p4_pix2foc`. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('focal coordinates', 8)) def sip_foc2pix(self, *args): if self.sip is None: if len(args) == 2: return args[0] elif len(args) == 3: return args[:2] else: raise TypeError("Wrong number of arguments") return self._array_converter(self.sip.foc2pix, None, *args) sip_foc2pix.__doc__ = """ Convert focal plane coordinates to pixel coordinates using the `SIP`_ polynomial distortion convention. FITS WCS `distortion paper`_ table lookup distortion correction is not applied, even if that information existed in the FITS file that initialized this `~astropy.wcs.WCS` object. Parameters ---------- {} Returns ------- {} Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(docstrings.TWO_OR_MORE_ARGS('2', 8), docstrings.RETURNS('pixel coordinates', 8)) def proj_plane_pixel_scales(self): """ Calculate pixel scales along each axis of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This method is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: This method only returns sensible answers if the WCS contains celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object. Returns ------- scale : list of `~astropy.units.Quantity` A vector of projection plane increments corresponding to each pixel side (axis). See Also -------- astropy.wcs.utils.proj_plane_pixel_scales """ # noqa: E501 from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import values = proj_plane_pixel_scales(self) units = [u.Unit(x) for x in self.wcs.cunit] return [value * unit for (value, unit) in zip(values, units)] # Can have different units def proj_plane_pixel_area(self): """ For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel area of the image pixel at the ``CRPIX`` location once it is projected onto the "plane of intermediate world coordinates" as defined in `Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_. .. note:: This function is concerned **only** about the transformation "image plane"->"projection plane" and **not** about the transformation "celestial sphere"->"projection plane"->"image plane". Therefore, this function ignores distortions arising due to non-linear nature of most projections. .. note:: This method only returns sensible answers if the WCS contains celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object. Returns ------- area : `~astropy.units.Quantity` Area (in the projection plane) of the pixel at ``CRPIX`` location. Raises ------ ValueError Pixel area is defined only for 2D pixels. Most likely the `~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial` WCS is not a square matrix of second order. Notes ----- Depending on the application, square root of the pixel area can be used to represent a single pixel scale of an equivalent square pixel whose area is equal to the area of a generally non-square pixel. See Also -------- astropy.wcs.utils.proj_plane_pixel_area """ # noqa: E501 from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import value = proj_plane_pixel_area(self) unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only return value * unit def to_fits(self, relax=False, key=None): """ Generate an `~astropy.io.fits.HDUList` object with all of the information stored in this object. This should be logically identical to the input FITS file, but it will be normalized in a number of ways. See `to_header` for some warnings about the output produced. Parameters ---------- relax : bool or int, optional Degree of permissiveness: - `False` (default): Write all extensions that are considered to be safe and recommended. - `True`: Write all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to write. See :ref:`astropy:relaxwrite` for details. key : str The name of a particular WCS transform to use. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"`` part of the ``CTYPEia`` cards. Returns ------- hdulist : `~astropy.io.fits.HDUList` """ header = self.to_header(relax=relax, key=key) hdu = fits.PrimaryHDU(header=header) hdulist = fits.HDUList(hdu) self._write_det2im(hdulist) self._write_distortion_kw(hdulist) return hdulist def to_header(self, relax=None, key=None): """Generate an `astropy.io.fits.Header` object with the basic WCS and SIP information stored in this object. This should be logically identical to the input FITS file, but it will be normalized in a number of ways. .. warning:: This function does not write out FITS WCS `distortion paper`_ information, since that requires multiple FITS header data units. To get a full representation of everything in this object, use `to_fits`. Parameters ---------- relax : bool or int, optional Degree of permissiveness: - `False` (default): Write all extensions that are considered to be safe and recommended. - `True`: Write all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to write. See :ref:`astropy:relaxwrite` for details. If the ``relax`` keyword argument is not given and any keywords were omitted from the output, an `~astropy.utils.exceptions.AstropyWarning` is displayed. To override this, explicitly pass a value to ``relax``. key : str The name of a particular WCS transform to use. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"`` part of the ``CTYPEia`` cards. Returns ------- header : `astropy.io.fits.Header` Notes ----- The output header will almost certainly differ from the input in a number of respects: 1. The output header only contains WCS-related keywords. In particular, it does not contain syntactically-required keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or ``END``. 2. Deprecated (e.g. ``CROTAn``) or non-standard usage will be translated to standard (this is partially dependent on whether ``fix`` was applied). 3. Quantities will be converted to the units used internally, basically SI with the addition of degrees. 4. Floating-point quantities may be given to a different decimal precision. 5. Elements of the ``PCi_j`` matrix will be written if and only if they differ from the unit matrix. Thus, if the matrix is unity then no elements will be written. 6. Additional keywords such as ``WCSAXES``, ``CUNITia``, ``LONPOLEa`` and ``LATPOLEa`` may appear. 7. The original keycomments will be lost, although `to_header` tries hard to write meaningful comments. 8. Keyword order may be changed. """ # default precision for numerical WCS keywords precision = WCSHDO_P14 # Defined by C-ext # noqa: F821 display_warning = False if relax is None: display_warning = True relax = False if relax not in (True, False): do_sip = relax & WCSHDO_SIP relax &= ~WCSHDO_SIP else: do_sip = relax relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext # noqa: F821 relax = precision | relax if self.wcs is not None: if key is not None: orig_key = self.wcs.alt self.wcs.alt = key header_string = self.wcs.to_header(relax) header = fits.Header.fromstring(header_string) keys_to_remove = ["", " ", "COMMENT"] for kw in keys_to_remove: if kw in header: del header[kw] # Check if we can handle TPD distortion correctly if int(_parsed_version[0]) * 10 + int(_parsed_version[1]) < 71: for kw, val in header.items(): if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD': warnings.warn( f"WCS contains a TPD distortion model in {kw}. WCSLIB " f"{_wcs.__version__} is writing this in a format incompatible with " f"current versions - please update to 7.4 or use the bundled WCSLIB.", AstropyWarning) elif int(_parsed_version[0]) * 10 + int(_parsed_version[1]) < 74: for kw, val in header.items(): if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD': warnings.warn( f"WCS contains a TPD distortion model in {kw}, which requires WCSLIB " f"7.4 or later to store in a FITS header (having {_wcs.__version__}).", AstropyWarning) else: header = fits.Header() if do_sip and self.sip is not None: if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype): self._fix_ctype(header, add_sip=True) for kw, val in self._write_sip_kw().items(): header[kw] = val if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None: # This is called when relax is not False or WCSHDO_SIP # The default case of ``relax=None`` is handled further in the code. header = self._fix_ctype(header, add_sip=False) if display_warning: full_header = self.to_header(relax=True, key=key) missing_keys = [] for kw, val in full_header.items(): if kw not in header: missing_keys.append(kw) if len(missing_keys): warnings.warn( "Some non-standard WCS keywords were excluded: {} " "Use the ``relax`` kwarg to control this.".format( ', '.join(missing_keys)), AstropyWarning) # called when ``relax=None`` # This is different from the case of ``relax=False``. if any(self.wcs.ctype) and self.sip is not None: header = self._fix_ctype(header, add_sip=False, log_message=False) # Finally reset the key. This must be called after ``_fix_ctype``. if key is not None: self.wcs.alt = orig_key return header def _fix_ctype(self, header, add_sip=True, log_message=True): """ Parameters ---------- header : `~astropy.io.fits.Header` FITS header. add_sip : bool Flag indicating whether "-SIP" should be added or removed from CTYPE keywords. Remove "-SIP" from CTYPE when writing out a header with relax=False. This needs to be done outside ``to_header`` because ``to_header`` runs twice when ``relax=False`` and the second time ``relax`` is set to ``True`` to display the missing keywords. If the user requested SIP distortion to be written out add "-SIP" to CTYPE if it is missing. """ _add_sip_to_ctype = """ Inconsistent SIP distortion information is present in the current WCS: SIP coefficients were detected, but CTYPE is missing "-SIP" suffix, therefore the current WCS is internally inconsistent. Because relax has been set to True, the resulting output WCS will have "-SIP" appended to CTYPE in order to make the header internally consistent. However, this may produce incorrect astrometry in the output WCS, if in fact the current WCS is already distortion-corrected. Therefore, if current WCS is already distortion-corrected (eg, drizzled) then SIP distortion components should not apply. In that case, for a WCS that is already distortion-corrected, please remove the SIP coefficients from the header. """ if log_message: if add_sip: log.info(_add_sip_to_ctype) for i in range(1, self.naxis+1): # strip() must be called here to cover the case of alt key= " " kw = f'CTYPE{i}{self.wcs.alt}'.strip() if kw in header: if add_sip: val = header[kw].strip("-SIP") + "-SIP" else: val = header[kw].strip("-SIP") header[kw] = val else: continue return header def to_header_string(self, relax=None): """ Identical to `to_header`, but returns a string containing the header cards. """ return str(self.to_header(relax)) def footprint_to_file(self, filename='footprint.reg', color='green', width=2, coordsys=None): """ Writes out a `ds9`_ style regions file. It can be loaded directly by `ds9`_. Parameters ---------- filename : str, optional Output file name - default is ``'footprint.reg'`` color : str, optional Color to use when plotting the line. width : int, optional Width of the region line. coordsys : str, optional Coordinate system. If not specified (default), the ``radesys`` value is used. For all possible values, see http://ds9.si.edu/doc/ref/region.html#RegionFileFormat """ comments = ('# Region file format: DS9 version 4.0 \n' '# global color=green font="helvetica 12 bold ' 'select=1 highlite=1 edit=1 move=1 delete=1 ' 'include=1 fixed=0 source\n') coordsys = coordsys or self.wcs.radesys if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5', 'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR', 'AMPLIFIER', 'DETECTOR'): raise ValueError("Coordinate system '{}' is not supported. A valid" " one can be given with the 'coordsys' argument." .format(coordsys)) with open(filename, mode='w') as f: f.write(comments) f.write(f'{coordsys}\n') f.write('polygon(') ftpr = self.calc_footprint() if ftpr is not None: ftpr.tofile(f, sep=',') f.write(f') # color={color}, width={width:d} \n') def _get_naxis(self, header=None): _naxis = [] if (header is not None and not isinstance(header, (str, bytes))): for naxis in itertools.count(1): try: _naxis.append(header[f'NAXIS{naxis}']) except KeyError: break if len(_naxis) == 0: _naxis = [0, 0] elif len(_naxis) == 1: _naxis.append(0) self._naxis = _naxis def printwcs(self): print(repr(self)) def __repr__(self): ''' Return a short description. Simply porting the behavior from the `printwcs()` method. ''' description = ["WCS Keywords\n", f"Number of WCS axes: {self.naxis!r}"] sfmt = ' : ' + "".join(["{"+f"{i}"+"!r} " for i in range(self.naxis)]) keywords = ['CTYPE', 'CRVAL', 'CRPIX'] values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix] for keyword, value in zip(keywords, values): description.append(keyword+sfmt.format(*value)) if hasattr(self.wcs, 'pc'): for i in range(self.naxis): s = '' for j in range(self.naxis): s += ''.join(['PC', str(i+1), '_', str(j+1), ' ']) s += sfmt description.append(s.format(*self.wcs.pc[i])) s = 'CDELT' + sfmt description.append(s.format(*self.wcs.cdelt)) elif hasattr(self.wcs, 'cd'): for i in range(self.naxis): s = '' for j in range(self.naxis): s += "".join(['CD', str(i+1), '_', str(j+1), ' ']) s += sfmt description.append(s.format(*self.wcs.cd[i])) description.append(f"NAXIS : {' '.join(map(str, self._naxis))}") return '\n'.join(description) def get_axis_types(self): """ Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>` but provides the information in a more Python-friendly format. Returns ------- result : list of dict Returns a list of dictionaries, one for each axis, each containing attributes about the type of that axis. Each dictionary has the following keys: - 'coordinate_type': - None: Non-specific coordinate type. - 'stokes': Stokes coordinate. - 'celestial': Celestial coordinate (including ``CUBEFACE``). - 'spectral': Spectral coordinate. - 'scale': - 'linear': Linear axis. - 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``). - 'non-linear celestial': Non-linear celestial axis. - 'non-linear spectral': Non-linear spectral axis. - 'logarithmic': Logarithmic axis. - 'tabular': Tabular axis. - 'group' - Group number, e.g. lookup table number - 'number' - For celestial axes: - 0: Longitude coordinate. - 1: Latitude coordinate. - 2: ``CUBEFACE`` number. - For lookup tables: - the axis number in a multidimensional table. ``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will generate an error. """ if self.wcs is None: raise AttributeError( "This WCS object does not have a wcsprm object.") coordinate_type_map = { 0: None, 1: 'stokes', 2: 'celestial', 3: 'spectral'} scale_map = { 0: 'linear', 1: 'quantized', 2: 'non-linear celestial', 3: 'non-linear spectral', 4: 'logarithmic', 5: 'tabular'} result = [] for axis_type in self.wcs.axis_types: subresult = {} coordinate_type = (axis_type // 1000) % 10 subresult['coordinate_type'] = coordinate_type_map[coordinate_type] scale = (axis_type // 100) % 10 subresult['scale'] = scale_map[scale] group = (axis_type // 10) % 10 subresult['group'] = group number = axis_type % 10 subresult['number'] = number result.append(subresult) return result def __reduce__(self): """ Support pickling of WCS objects. This is done by serializing to an in-memory FITS file and dumping that as a string. """ hdulist = self.to_fits(relax=True) buffer = io.BytesIO() hdulist.writeto(buffer) dct = self.__dict__.copy() dct['_alt_wcskey'] = self.wcs.alt return (__WCS_unpickle__, (self.__class__, dct, buffer.getvalue(),)) def dropaxis(self, dropax): """ Remove an axis from the WCS. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS with naxis to be chopped to naxis-1 dropax : int The index of the WCS to drop, counting from 0 (i.e., python convention, not FITS convention) Returns ------- `~astropy.wcs.WCS` A new `~astropy.wcs.WCS` instance with one axis fewer """ inds = list(range(self.wcs.naxis)) inds.pop(dropax) # axis 0 has special meaning to sub # if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want # wcs.sub([1,2]) to get 'RA','DEC' back return self.sub([i+1 for i in inds]) def swapaxes(self, ax0, ax1): """ Swap axes in a WCS. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to have its axes swapped ax0 : int ax1 : int The indices of the WCS to be swapped, counting from 0 (i.e., python convention, not FITS convention) Returns ------- `~astropy.wcs.WCS` A new `~astropy.wcs.WCS` instance with the same number of axes, but two swapped """ inds = list(range(self.wcs.naxis)) inds[ax0], inds[ax1] = inds[ax1], inds[ax0] return self.sub([i+1 for i in inds]) def reorient_celestial_first(self): """ Reorient the WCS such that the celestial axes are first, followed by the spectral axis, followed by any others. Assumes at least celestial axes are present. """ return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES]) # Defined by C-ext # noqa: F821 E501 def slice(self, view, numpy_order=True): """ Slice a WCS instance using a Numpy slice. The order of the slice should be reversed (as for the data) compared to the natural WCS order. Parameters ---------- view : tuple A tuple containing the same number of slices as the WCS system. The ``step`` method, the third argument to a slice, is not presently supported. numpy_order : bool Use numpy order, i.e. slice the WCS so that an identical slice applied to a numpy array will slice the array and WCS in the same way. If set to `False`, the WCS will be sliced in FITS order, meaning the first slice will be applied to the *last* numpy index but the *first* WCS axis. Returns ------- wcs_new : `~astropy.wcs.WCS` A new resampled WCS axis """ if hasattr(view, '__len__') and len(view) > self.wcs.naxis: raise ValueError("Must have # of slices <= # of WCS axes") elif not hasattr(view, '__len__'): # view MUST be an iterable view = [view] if not all(isinstance(x, slice) for x in view): # We need to drop some dimensions, but this may not always be # possible with .sub due to correlated axes, so instead we use the # generalized slicing infrastructure from astropy.wcs.wcsapi. return SlicedFITSWCS(self, view) # NOTE: we could in principle use SlicedFITSWCS as above for all slicing, # but in the simple case where there are no axes dropped, we can just # create a full WCS object with updated WCS parameters which is faster # for this specific case and also backward-compatible. wcs_new = self.deepcopy() if wcs_new.sip is not None: sip_crpix = wcs_new.sip.crpix.tolist() for i, iview in enumerate(view): if iview.step is not None and iview.step < 0: raise NotImplementedError("Reversing an axis is not " "implemented.") if numpy_order: wcs_index = self.wcs.naxis - 1 - i else: wcs_index = i if iview.step is not None and iview.start is None: # Slice from "None" is equivalent to slice from 0 (but one # might want to downsample, so allow slices with # None,None,step or None,stop,step) iview = slice(0, iview.stop, iview.step) if iview.start is not None: if iview.step not in (None, 1): crpix = self.wcs.crpix[wcs_index] cdelt = self.wcs.cdelt[wcs_index] # equivalently (keep this comment so you can compare eqns): # wcs_new.wcs.crpix[wcs_index] = # (crpix - iview.start)*iview.step + 0.5 - iview.step/2. crp = ((crpix - iview.start - 1.)/iview.step + 0.5 + 1./iview.step/2.) wcs_new.wcs.crpix[wcs_index] = crp if wcs_new.sip is not None: sip_crpix[wcs_index] = crp wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step else: wcs_new.wcs.crpix[wcs_index] -= iview.start if wcs_new.sip is not None: sip_crpix[wcs_index] -= iview.start try: # range requires integers but the other attributes can also # handle arbitrary values, so this needs to be in a try/except. nitems = len(builtins.range(self._naxis[wcs_index])[iview]) except TypeError as exc: if 'indices must be integers' not in str(exc): raise warnings.warn("NAXIS{} attribute is not updated because at " "least one index ('{}') is no integer." "".format(wcs_index, iview), AstropyUserWarning) else: wcs_new._naxis[wcs_index] = nitems if wcs_new.sip is not None: wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix) return wcs_new def __getitem__(self, item): # "getitem" is a shortcut for self.slice; it is very limited # there is no obvious and unambiguous interpretation of wcs[1,2,3] # We COULD allow wcs[1] to link to wcs.sub([2]) # (wcs[i] -> wcs.sub([i+1]) return self.slice(item) def __iter__(self): # Having __getitem__ makes Python think WCS is iterable. However, # Python first checks whether __iter__ is present, so we can raise an # exception here. raise TypeError(f"'{self.__class__.__name__}' object is not iterable") @property def axis_type_names(self): """ World names for each coordinate axis Returns ------- list of str A list of names along each axis. """ names = list(self.wcs.cname) types = self.wcs.ctype for i in range(len(names)): if len(names[i]) > 0: continue names[i] = types[i].split('-')[0] return names @property def celestial(self): """ A copy of the current WCS with only the celestial axes included """ return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext # noqa: F821 @property def is_celestial(self): return self.has_celestial and self.naxis == 2 @property def has_celestial(self): try: return self.wcs.lng >= 0 and self.wcs.lat >= 0 except InconsistentAxisTypesError: return False @property def spectral(self): """ A copy of the current WCS with only the spectral axes included """ return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext # noqa: F821 @property def is_spectral(self): return self.has_spectral and self.naxis == 1 @property def has_spectral(self): try: return self.wcs.spec >= 0 except InconsistentAxisTypesError: return False @property def has_distortion(self): """ Returns `True` if any distortion terms are present. """ return (self.sip is not None or self.cpdis1 is not None or self.cpdis2 is not None or self.det2im1 is not None and self.det2im2 is not None) @property def pixel_scale_matrix(self): try: cdelt = np.diag(self.wcs.get_cdelt()) pc = self.wcs.get_pc() except InconsistentAxisTypesError: try: # for non-celestial axes, get_cdelt doesn't work with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', 'cdelt will be ignored since cd is present', RuntimeWarning) cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt)) except AttributeError: cdelt = np.diag(self.wcs.cdelt) try: pc = self.wcs.pc except AttributeError: pc = 1 pccd = np.dot(cdelt, pc) return pccd def footprint_contains(self, coord, **kwargs): """ Determines if a given SkyCoord is contained in the wcs footprint. Parameters ---------- coord : `~astropy.coordinates.SkyCoord` The coordinate to check if it is within the wcs coordinate. **kwargs : Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel` Returns ------- response : bool True means the WCS footprint contains the coordinate, False means it does not. """ return coord.contained_by(self, **kwargs) def __WCS_unpickle__(cls, dct, fits_data): """ Unpickles a WCS object from a serialized FITS string. """ self = cls.__new__(cls) buffer = io.BytesIO(fits_data) hdulist = fits.open(buffer) naxis = dct.pop('naxis', None) if naxis: hdulist[0].header['naxis'] = naxis naxes = dct.pop('_naxis', []) for k, na in enumerate(naxes): hdulist[0].header[f'naxis{k + 1:d}'] = na kwargs = dct.pop('_init_kwargs', {}) self.__dict__.update(dct) wcskey = dct.pop('_alt_wcskey', ' ') WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs) self.pixel_bounds = dct.get('_pixel_bounds', None) return self def find_all_wcs(header, relax=True, keysel=None, fix=True, translate_units='', _do_set=True): """ Find all the WCS transformations in the given header. Parameters ---------- header : str or `~astropy.io.fits.Header` object. relax : bool or int, optional Degree of permissiveness: - `True` (default): Admit all recognized informal extensions of the WCS standard. - `False`: Recognize only FITS keywords defined by the published WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. keysel : sequence of str, optional A list of flags used to select the keyword types considered by wcslib. When ``None``, only the standard image header keywords are considered (and the underlying wcspih() C function is called). To use binary table image array or pixel list keywords, *keysel* must be set. Each element in the list should be one of the following strings: - 'image': Image header keywords - 'binary': Binary table image array keywords - 'pixel': Pixel list keywords Keywords such as ``EQUIna`` or ``RFRQna`` that are common to binary table image arrays and pixel lists (including ``WCSNna`` and ``TWCSna``) are selected by both 'binary' and 'pixel'. fix : bool, optional When `True` (default), call `~astropy.wcs.Wcsprm.fix` on the resulting objects to fix any non-standard uses in the header. `FITSFixedWarning` warnings will be emitted if any changes were made. translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs none. See `WCS.fix` for more information about this parameter. Only effective when ``fix`` is `True`. Returns ------- wcses : list of `WCS` """ if isinstance(header, (str, bytes)): header_string = header elif isinstance(header, fits.Header): header_string = header.tostring() else: raise TypeError( "header must be a string or astropy.io.fits.Header object") keysel_flags = _parse_keysel(keysel) if isinstance(header_string, str): header_bytes = header_string.encode('ascii') else: header_bytes = header_string wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags) result = [] for wcsprm in wcsprms: subresult = WCS(fix=False, _do_set=False) subresult.wcs = wcsprm result.append(subresult) if fix: subresult.fix(translate_units) if _do_set: subresult.wcs.set() return result def validate(source): """ Prints a WCS validation report for the given FITS file. Parameters ---------- source : str or file-like or `~astropy.io.fits.HDUList` The FITS file to validate. Returns ------- results : list subclass instance The result is returned as nested lists. The first level corresponds to the HDUs in the given file. The next level has an entry for each WCS found in that header. The special subclass of list will pretty-print the results as a table when printed. """ class _WcsValidateWcsResult(list): def __init__(self, key): self._key = key def __repr__(self): result = [f" WCS key '{self._key or ' '}':"] if len(self): for entry in self: for i, line in enumerate(entry.splitlines()): if i == 0: initial_indent = ' - ' else: initial_indent = ' ' result.extend( textwrap.wrap( line, initial_indent=initial_indent, subsequent_indent=' ')) else: result.append(" No issues.") return '\n'.join(result) class _WcsValidateHduResult(list): def __init__(self, hdu_index, hdu_name): self._hdu_index = hdu_index self._hdu_name = hdu_name list.__init__(self) def __repr__(self): if len(self): if self._hdu_name: hdu_name = f' ({self._hdu_name})' else: hdu_name = '' result = [f'HDU {self._hdu_index}{hdu_name}:'] for wcs in self: result.append(repr(wcs)) return '\n'.join(result) return '' class _WcsValidateResults(list): def __repr__(self): result = [] for hdu in self: content = repr(hdu) if len(content): result.append(content) return '\n\n'.join(result) global __warningregistry__ if isinstance(source, fits.HDUList): hdulist = source else: hdulist = fits.open(source) results = _WcsValidateResults() for i, hdu in enumerate(hdulist): hdu_results = _WcsValidateHduResult(i, hdu.name) results.append(hdu_results) with warnings.catch_warnings(record=True) as warning_lines: wcses = find_all_wcs( hdu.header, relax=_wcs.WCSHDR_reject, fix=False, _do_set=False) for wcs in wcses: wcs_results = _WcsValidateWcsResult(wcs.wcs.alt) hdu_results.append(wcs_results) try: del __warningregistry__ except NameError: pass with warnings.catch_warnings(record=True) as warning_lines: warnings.resetwarnings() warnings.simplefilter( "always", FITSFixedWarning, append=True) try: WCS(hdu.header, key=wcs.wcs.alt or ' ', relax=_wcs.WCSHDR_reject, fix=True, _do_set=False) except WcsError as e: wcs_results.append(str(e)) wcs_results.extend([str(x.message) for x in warning_lines]) return results
e58908fb2b8d5c410432ac4917b9dbd19e52ab1cb9e6ec66054a9b848c5aed40
# Licensed under a 3-clause BSD style license - see LICENSE.rst # It gets to be really tedious to type long docstrings in ANSI C # syntax (since multi-line string literals are not valid). # Therefore, the docstrings are written here in doc/docstrings.py, # which are then converted by setup.py into docstrings.h, which is # included by pywcs.c __all__ = ['TWO_OR_MORE_ARGS', 'RETURNS', 'ORIGIN', 'RA_DEC_ORDER'] def _fix(content, indent=0): lines = content.split('\n') indent = '\n' + ' ' * indent return indent.join(lines) def TWO_OR_MORE_ARGS(naxis, indent=0): return _fix( f"""*args There are two accepted forms for the positional arguments: - 2 arguments: An *N* x *{naxis}* array of coordinates, and an *origin*. - more than 2 arguments: An array for each axis, followed by an *origin*. These arrays must be broadcastable to one another. Here, *origin* is the coordinate in the upper left corner of the image. In FITS and Fortran standards, this is 1. In Numpy and C standards this is 0. """, indent) def RETURNS(out_type, indent=0): return _fix(f"""result : array Returns the {out_type}. If the input was a single array and origin, a single array is returned, otherwise a tuple of arrays is returned.""", indent) def ORIGIN(indent=0): return _fix( """ origin : int Specifies the origin of pixel values. The Fortran and FITS standards use an origin of 1. Numpy and C use array indexing with origin at 0. """, indent) def RA_DEC_ORDER(indent=0): return _fix( """ ra_dec_order : bool, optional When `True` will ensure that world coordinates are always given and returned in as (*ra*, *dec*) pairs, regardless of the order of the axes specified by the in the ``CTYPE`` keywords. Default is `False`. """, indent) a = """ ``double array[a_order+1][a_order+1]`` Focal plane transformation matrix. The `SIP`_ ``A_i_j`` matrix used for pixel to focal plane transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ a_order = """ ``int`` (read-only) Order of the polynomial (``A_ORDER``). """ all_pix2world = """ all_pix2world(pixcrd, origin) -> ``double array[ncoord][nelem]`` Transforms pixel coordinates to world coordinates. Does the following: - Detector to image plane correction (if present) - SIP distortion correction (if present) - FITS WCS distortion correction (if present) - wcslib "core" WCS transformation The first three (the distortion corrections) are done in parallel. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- world : ndarray Returns an array of world coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError Invalid coordinate transformation parameters. ValueError x- and y-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation. InvalidTransformError Ill-conditioned coordinate transformation parameters. """.format(ORIGIN()) alt = """ ``str`` Character code for alternate coordinate descriptions. For example, the ``"a"`` in keyword names such as ``CTYPEia``. This is a space character for the primary coordinate description, or one of the 26 upper-case letters, A-Z. """ ap = """ ``double array[ap_order+1][ap_order+1]`` Focal plane to pixel transformation matrix. The `SIP`_ ``AP_i_j`` matrix used for focal plane to pixel transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ ap_order = """ ``int`` (read-only) Order of the polynomial (``AP_ORDER``). """ cel = """ `~astropy.wcs.Celprm` Information required to transform celestial coordinates. """ Celprm = """ Class that contains information required to transform celestial coordinates. It consists of certain members that must be set by the user (given) and others that are set by the WCSLIB routines (returned). Some of the latter are supplied for informational purposes and others are for internal use only. """ Prjprm = """ Class that contains information needed to project or deproject native spherical coordinates. It consists of certain members that must be set by the user (given) and others that are set by the WCSLIB routines (returned). Some of the latter are supplied for informational purposes and others are for internal use only. """ aux = """ `~astropy.wcs.Auxprm` Auxiliary coordinate system information of a specialist nature. """ Auxprm = """ Class that contains auxiliary coordinate system information of a specialist nature. This class can not be constructed directly from Python, but instead is returned from `~astropy.wcs.Wcsprm.aux`. """ axis_types = """ ``int array[naxis]`` An array of four-digit type codes for each axis. - First digit (i.e. 1000s): - 0: Non-specific coordinate type. - 1: Stokes coordinate. - 2: Celestial coordinate (including ``CUBEFACE``). - 3: Spectral coordinate. - Second digit (i.e. 100s): - 0: Linear axis. - 1: Quantized axis (``STOKES``, ``CUBEFACE``). - 2: Non-linear celestial axis. - 3: Non-linear spectral axis. - 4: Logarithmic axis. - 5: Tabular axis. - Third digit (i.e. 10s): - 0: Group number, e.g. lookup table number - The fourth digit is used as a qualifier depending on the axis type. - For celestial axes: - 0: Longitude coordinate. - 1: Latitude coordinate. - 2: ``CUBEFACE`` number. - For lookup tables: the axis number in a multidimensional table. ``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will have its type set to -1 and generate an error. """ b = """ ``double array[b_order+1][b_order+1]`` Pixel to focal plane transformation matrix. The `SIP`_ ``B_i_j`` matrix used for pixel to focal plane transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ b_order = """ ``int`` (read-only) Order of the polynomial (``B_ORDER``). """ bounds_check = """ bounds_check(pix2world, world2pix) Enable/disable bounds checking. Parameters ---------- pix2world : bool, optional When `True`, enable bounds checking for the pixel-to-world (p2x) transformations. Default is `True`. world2pix : bool, optional When `True`, enable bounds checking for the world-to-pixel (s2x) transformations. Default is `True`. Notes ----- Note that by default (without calling `bounds_check`) strict bounds checking is enabled. """ bp = """ ``double array[bp_order+1][bp_order+1]`` Focal plane to pixel transformation matrix. The `SIP`_ ``BP_i_j`` matrix used for focal plane to pixel transformation. Its values may be changed in place, but it may not be resized, without creating a new `~astropy.wcs.Sip` object. """ bp_order = """ ``int`` (read-only) Order of the polynomial (``BP_ORDER``). """ cd = """ ``double array[naxis][naxis]`` The ``CDi_ja`` linear transformation matrix. For historical compatibility, three alternate specifications of the linear transformations are available in wcslib. The canonical ``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated ``CROTAia`` keywords. Although the latter may not formally co-exist with ``PCi_ja``, the approach here is simply to ignore them if given in conjunction with ``PCi_ja``. `~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and `~astropy.wcs.Wcsprm.has_crota` can be used to determine which of these alternatives are present in the header. These alternate specifications of the linear transformation matrix are translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and are nowhere visible to the lower-level routines. In particular, `~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts to a unity ``PCi_ja`` matrix. """ cdelt = """ ``double array[naxis]`` Coordinate increments (``CDELTia``) for each coord axis. If a ``CDi_ja`` linear transformation matrix is present, a warning is raised and `~astropy.wcs.Wcsprm.cdelt` is ignored. The ``CDi_ja`` matrix may be deleted by:: del wcs.wcs.cd An undefined value is represented by NaN. """ cdfix = """ cdfix() Fix erroneously omitted ``CDi_ja`` keywords. Sets the diagonal element of the ``CDi_ja`` matrix to unity if all ``CDi_ja`` keywords associated with a given axis were omitted. According to Paper I, if any ``CDi_ja`` keywords at all are given in a FITS header then those not given default to zero. This results in a singular matrix with an intersecting row and column of zeros. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ cel_offset = """ ``boolean`` Is there an offset? If `True`, an offset will be applied to ``(x, y)`` to force ``(x, y) = (0, 0)`` at the fiducial point, (phi_0, theta_0). Default is `False`. """ celprm_phi0 = r""" `float`, `None`. The native longitude, :math:`\phi_0`, in degrees of the fiducial point, i.e., the point whose celestial coordinates are given in ''Celprm.ref[0:1]''. If `None` or ``nan``, the initialization routine, ``celset()``, will set this to a projection-specific default. """ celprm_theta0 = r""" `float`, `None`. The native latitude, :math:`\theta_0`, in degrees of the fiducial point, i.e. the point whose celestial coordinates are given in ``Celprm:ref[0:1]``. If `None` or ``nan``, the initialization routine, ``celset()``, will set this to a projection-specific default. """ celprm_ref = """ ``numpy.ndarray`` with 4 elements. (Given) The first pair of values should be set to the celestial longitude and latitude of the fiducial point in degrees - typically right ascension and declination. These are given by the ``CRVALia`` keywords in ``FITS``. (Given and returned) The second pair of values are the native longitude, ``phi_p`` (in degrees), and latitude, ``theta_p`` (in degrees), of the celestial pole (the latter is the same as the celestial latitude of the native pole, ``delta_p``) and these are given by the ``FITS`` keywords ``LONPOLEa`` and ``LATPOLEa`` (or by ``PVi_2a`` and ``PVi_3a`` attached to the longitude axis which take precedence if defined). ``LONPOLEa`` defaults to ``phi0`` if the celestial latitude of the fiducial point of the projection is greater than or equal to the native latitude, otherwise ``phi0 + 180`` (degrees). (This is the condition for the celestial latitude to increase in the same direction as the native latitude at the fiducial point.) ``ref[2]`` may be set to `None` or ``numpy.nan`` or 999.0 to indicate that the correct default should be substituted. ``theta_p``, the native latitude of the celestial pole (or equally the celestial latitude of the native pole, ``delta_p``) is often determined uniquely by ``CRVALia`` and ``LONPOLEa`` in which case ``LATPOLEa`` is ignored. However, in some circumstances there are two valid solutions for ``theta_p`` and ``LATPOLEa`` is used to choose between them. ``LATPOLEa`` is set in ``ref[3]`` and the solution closest to this value is used to reset ``ref[3]``. It is therefore legitimate, for example, to set ``ref[3]`` to ``+90.0`` to choose the more northerly solution - the default if the ``LATPOLEa`` keyword is omitted from the ``FITS`` header. For the special case where the fiducial point of the projection is at native latitude zero, its celestial latitude is zero, and ``LONPOLEa`` = ``+/- 90.0`` then the celestial latitude of the native pole is not determined by the first three reference values and ``LATPOLEa`` specifies it completely. The returned value, celprm.latpreq, specifies how ``LATPOLEa`` was actually used.""" celprm_euler = """ *Read-only* ``numpy.ndarray`` with 5 elements. Euler angles and associated intermediaries derived from the coordinate reference values. The first three values are the ``Z-``, ``X-``, and ``Z``-Euler angles in degrees, and the remaining two are the cosine and sine of the ``X``-Euler angle. """ celprm_latpreq = """ ``int``, *read-only*. For informational purposes, this indicates how the ``LATPOLEa`` keyword was used: - 0: Not required, ``theta_p == delta_p`` was determined uniquely by the ``CRVALia`` and ``LONPOLEa`` keywords. - 1: Required to select between two valid solutions of ``theta_p``. - 2: ``theta_p`` was specified solely by ``LATPOLEa``. """ celprm_isolat = """ ``bool``, *read-only*. True if the spherical rotation preserves the magnitude of the latitude, which occurs if the axes of the native and celestial coordinates are coincident. It signals an opportunity to cache intermediate calculations common to all elements in a vector computation. """ celprm_prj = """ *Read-only* Celestial transformation parameters. Some members of `Prjprm` are read-write, i.e., can be set by the user. For more details, see documentation for `Prjprm`. """ prjprm_r0 = r""" The radius of the generating sphere for the projection, a linear scaling parameter. If this is zero, it will be reset to its default value of :math:`180^\circ/\pi` (the value for FITS WCS). """ prjprm_code = """ Three-letter projection code defined by the FITS standard. """ prjprm_pv = """ Projection parameters. These correspond to the ``PVi_ma`` keywords in FITS, so ``pv[0]`` is ``PVi_0a``, ``pv[1]`` is ``PVi_1a``, etc., where ``i`` denotes the latitude-like axis. Many projections use ``pv[1]`` (``PVi_1a``), some also use ``pv[2]`` (``PVi_2a``) and ``SZP`` uses ``pv[3]`` (``PVi_3a``). ``ZPN`` is currently the only projection that uses any of the others. When setting ``pv`` values using lists or ``numpy.ndarray``, elements set to `None` will be left unchanged while those set to ``numpy.nan`` will be set to ``WCSLIB``'s ``UNDEFINED`` special value. For efficiency purposes, if supplied list or ``numpy.ndarray`` is shorter than the length of the ``pv`` member, then remaining values in ``pv`` will be left unchanged. .. note:: When retrieving ``pv``, a copy of the ``prjprm.pv`` array is returned. Modifying this array values will not modify underlying ``WCSLIB``'s ``prjprm.pv`` data. """ prjprm_pvi = """ Set/Get projection parameters for specific index. These correspond to the ``PVi_ma`` keywords in FITS, so ``pv[0]`` is ``PVi_0a``, ``pv[1]`` is ``PVi_1a``, etc., where ``i`` denotes the latitude-like axis. Many projections use ``pv[1]`` (``PVi_1a``), some also use ``pv[2]`` (``PVi_2a``) and ``SZP`` uses ``pv[3]`` (``PVi_3a``). ``ZPN`` is currently the only projection that uses any of the others. Setting a ``pvi`` value to `None` will reset the corresponding ``WCSLIB``'s ``prjprm.pv`` element to the default value as set by ``WCSLIB``'s ``prjini()``. Setting a ``pvi`` value to ``numpy.nan`` will set the corresponding ``WCSLIB``'s ``prjprm.pv`` element to ``WCSLIB``'s ``UNDEFINED`` special value. """ prjprm_phi0 = r""" The native longitude, :math:`\phi_0` (in degrees) of the reference point, i.e. the point ``(x,y) = (0,0)``. If undefined the initialization routine will set this to a projection-specific default. """ prjprm_theta0 = r""" the native latitude, :math:`\theta_0` (in degrees) of the reference point, i.e. the point ``(x,y) = (0,0)``. If undefined the initialization routine will set this to a projection-specific default. """ prjprm_bounds = """ Controls bounds checking. If ``bounds&1`` then enable strict bounds checking for the spherical-to-Cartesian (``s2x``) transformation for the ``AZP``, ``SZP``, ``TAN``, ``SIN``, ``ZPN``, and ``COP`` projections. If ``bounds&2`` then enable strict bounds checking for the Cartesian-to-spherical transformation (``x2s``) for the ``HPX`` and ``XPH`` projections. If ``bounds&4`` then the Cartesian- to-spherical transformations (``x2s``) will invoke WCSLIB's ``prjbchk()`` to perform bounds checking on the computed native coordinates, with a tolerance set to suit each projection. bounds is set to 7 during initialization by default which enables all checks. Zero it to disable all checking. It is not necessary to reset the ``Prjprm`` struct (via ``Prjprm.set()``) when ``bounds`` is changed. """ prjprm_name = """ *Read-only.* Long name of the projection. """ prjprm_category = """ *Read-only.* Projection category matching the value of the relevant ``wcs`` module constants: PRJ_ZENITHAL, PRJ_CYLINDRICAL, PRJ_PSEUDOCYLINDRICAL, PRJ_CONVENTIONAL, PRJ_CONIC, PRJ_POLYCONIC, PRJ_QUADCUBE, and PRJ_HEALPIX. """ prjprm_w = """ *Read-only.* Intermediate floating-point values derived from the projection parameters, cached here to save recomputation. .. note:: When retrieving ``w``, a copy of the ``prjprm.w`` array is returned. Modifying this array values will not modify underlying ``WCSLIB``'s ``prjprm.w`` data. """ prjprm_pvrange = """ *Read-only.* Range of projection parameter indices: 100 times the first allowed index plus the number of parameters, e.g. ``TAN`` is 0 (no parameters), ``SZP`` is 103 (1 to 3), and ``ZPN`` is 30 (0 to 29). """ prjprm_simplezen = """ *Read-only.* True if the projection is a radially-symmetric zenithal projection. """ prjprm_equiareal = """ *Read-only.* True if the projection is equal area. """ prjprm_conformal = """ *Read-only.* True if the projection is conformal. """ prjprm_global_projection = """ *Read-only.* True if the projection can represent the whole sphere in a finite, non-overlapped mapping. """ prjprm_divergent = """ *Read-only.* True if the projection diverges in latitude. """ prjprm_x0 = r""" *Read-only.* The offset in ``x`` used to force :math:`(x,y) = (0,0)` at :math:`(\phi_0, \theta_0)`. """ prjprm_y0 = r""" *Read-only.* The offset in ``y`` used to force :math:`(x,y) = (0,0)` at :math:`(\phi_0, \theta_0)`. """ prjprm_m = """ *Read-only.* Intermediate integer value (used only for the ``ZPN`` and ``HPX`` projections). """ prjprm_n = """ *Read-only.* Intermediate integer value (used only for the ``ZPN`` and ``HPX`` projections). """ prjprm_set = """ This method sets up a ``Prjprm`` object according to information supplied within it. Note that this routine need not be called directly; it will be invoked by `prjx2s` and `prjs2x` if ``Prjprm.flag`` is anything other than a predefined magic value. The one important property of ``set()`` is that the projection code must be defined in the ``Prjprm`` in order for ``set()`` to identify the required projection. Raises ------ MemoryError Null ``prjprm`` pointer passed to WCSLIB routines. InvalidPrjParametersError Invalid projection parameters. InvalidCoordinateError One or more of the ``(x,y)`` or ``(lon,lat)`` coordinates were invalid. """ prjprm_prjx2s = r""" Deproject Cartesian ``(x,y)`` coordinates in the plane of projection to native spherical coordinates :math:`(\phi,\theta)`. The projection is that specified by ``Prjprm.code``. Parameters ---------- x, y : numpy.ndarray Arrays corresponding to the first (``x``) and second (``y``) projected coordinates. Returns ------- phi, theta : tuple of numpy.ndarray Longitude and latitude :math:`(\phi,\theta)` of the projected point in native spherical coordinates (in degrees). Values corresponding to invalid ``(x,y)`` coordinates are set to ``numpy.nan``. Raises ------ MemoryError Null ``prjprm`` pointer passed to WCSLIB routines. InvalidPrjParametersError Invalid projection parameters. """ prjprm_prjs2x = r""" Project native spherical coordinates :math:`(\phi,\theta)` to Cartesian ``(x,y)`` coordinates in the plane of projection. The projection is that specified by ``Prjprm.code``. Parameters ---------- phi : numpy.ndarray Array corresponding to the longitude :math:`\phi` of the projected point in native spherical coordinates (in degrees). theta : numpy.ndarray Array corresponding to the longitude :math:`\theta` of the projected point in native spherical coordinatess (in degrees). Values corresponding to invalid :math:`(\phi, \theta)` coordinates are set to ``numpy.nan``. Returns ------- x, y : tuple of numpy.ndarray Projected coordinates. Raises ------ MemoryError Null ``prjprm`` pointer passed to WCSLIB routines. InvalidPrjParametersError Invalid projection parameters. """ celfix = """ Translates AIPS-convention celestial projection types, ``-NCP`` and ``-GLS``. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ cname = """ ``list of strings`` A list of the coordinate axis names, from ``CNAMEia``. """ colax = """ ``int array[naxis]`` An array recording the column numbers for each axis in a pixel list. """ colnum = """ ``int`` Column of FITS binary table associated with this WCS. Where the coordinate representation is associated with an image-array column in a FITS binary table, this property may be used to record the relevant column number. It should be set to zero for an image header or pixel list. """ compare = """ compare(other, cmp=0, tolerance=0.0) Compare two Wcsprm objects for equality. Parameters ---------- other : Wcsprm The other Wcsprm object to compare to. cmp : int, optional A bit field controlling the strictness of the comparison. When 0, (the default), all fields must be identical. The following constants, defined in the `astropy.wcs` module, may be or'ed together to loosen the comparison. - ``WCSCOMPARE_ANCILLARY``: Ignores ancillary keywords that don't change the WCS transformation, such as ``XPOSURE`` or ``EQUINOX``. Note that this also ignores ``DATE-OBS``, which does change the WCS transformation in some cases. - ``WCSCOMPARE_TILING``: Ignore integral differences in ``CRPIXja``. This is the 'tiling' condition, where two WCSes cover different regions of the same map projection and align on the same map grid. - ``WCSCOMPARE_CRPIX``: Ignore any differences at all in ``CRPIXja``. The two WCSes cover different regions of the same map projection but may not align on the same grid map. Overrides ``WCSCOMPARE_TILING``. tolerance : float, optional The amount of tolerance required. For example, for a value of 1e-6, all floating-point values in the objects must be equal to the first 6 decimal places. The default value of 0.0 implies exact equality. Returns ------- equal : bool """ convert = """ convert(array) Perform the unit conversion on the elements of the given *array*, returning an array of the same shape. """ coord = """ ``double array[K_M]...[K_2][K_1][M]`` The tabular coordinate array. Has the dimensions:: (K_M, ... K_2, K_1, M) (see `~astropy.wcs.Tabprm.K`) i.e. with the `M` dimension varying fastest so that the `M` elements of a coordinate vector are stored contiguously in memory. """ copy = """ Creates a deep copy of the WCS object. """ cpdis1 = """ `~astropy.wcs.DistortionLookupTable` The pre-linear transformation distortion lookup table, ``CPDIS1``. """ cpdis2 = """ `~astropy.wcs.DistortionLookupTable` The pre-linear transformation distortion lookup table, ``CPDIS2``. """ crder = """ ``double array[naxis]`` The random error in each coordinate axis, ``CRDERia``. An undefined value is represented by NaN. """ crln_obs = """ ``double`` Carrington heliographic longitude of the observer (deg). If undefined, this is set to `None`. """ crota = """ ``double array[naxis]`` ``CROTAia`` keyvalues for each coordinate axis. For historical compatibility, three alternate specifications of the linear transformations are available in wcslib. The canonical ``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated ``CROTAia`` keywords. Although the latter may not formally co-exist with ``PCi_ja``, the approach here is simply to ignore them if given in conjunction with ``PCi_ja``. `~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and `~astropy.wcs.Wcsprm.has_crota` can be used to determine which of these alternatives are present in the header. These alternate specifications of the linear transformation matrix are translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and are nowhere visible to the lower-level routines. In particular, `~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts to a unity ``PCi_ja`` matrix. """ crpix = """ ``double array[naxis]`` Coordinate reference pixels (``CRPIXja``) for each pixel axis. """ crval = """ ``double array[naxis]`` Coordinate reference values (``CRVALia``) for each coordinate axis. """ crval_tabprm = """ ``double array[M]`` Index values for the reference pixel for each of the tabular coord axes. """ csyer = """ ``double array[naxis]`` The systematic error in the coordinate value axes, ``CSYERia``. An undefined value is represented by NaN. """ ctype = """ ``list of strings[naxis]`` List of ``CTYPEia`` keyvalues. The `~astropy.wcs.Wcsprm.ctype` keyword values must be in upper case and there must be zero or one pair of matched celestial axis types, and zero or one spectral axis. """ cubeface = """ ``int`` Index into the ``pixcrd`` (pixel coordinate) array for the ``CUBEFACE`` axis. This is used for quadcube projections where the cube faces are stored on a separate axis. The quadcube projections (``TSC``, ``CSC``, ``QSC``) may be represented in FITS in either of two ways: - The six faces may be laid out in one plane and numbered as follows:: 0 4 3 2 1 4 3 2 5 Faces 2, 3 and 4 may appear on one side or the other (or both). The world-to-pixel routines map faces 2, 3 and 4 to the left but the pixel-to-world routines accept them on either side. - The ``COBE`` convention in which the six faces are stored in a three-dimensional structure using a ``CUBEFACE`` axis indexed from 0 to 5 as above. These routines support both methods; `~astropy.wcs.Wcsprm.set` determines which is being used by the presence or absence of a ``CUBEFACE`` axis in `~astropy.wcs.Wcsprm.ctype`. `~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` translate the ``CUBEFACE`` axis representation to the single plane representation understood by the lower-level projection routines. """ cunit = """ ``list of astropy.UnitBase[naxis]`` List of ``CUNITia`` keyvalues as `astropy.units.UnitBase` instances. These define the units of measurement of the ``CRVALia``, ``CDELTia`` and ``CDi_ja`` keywords. As ``CUNITia`` is an optional header keyword, `~astropy.wcs.Wcsprm.cunit` may be left blank but otherwise is expected to contain a standard units specification as defined by WCS Paper I. `~astropy.wcs.Wcsprm.unitfix` is available to translate commonly used non-standard units specifications but this must be done as a separate step before invoking `~astropy.wcs.Wcsprm.set`. For celestial axes, if `~astropy.wcs.Wcsprm.cunit` is not blank, `~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` to decimal degrees. It then resets `~astropy.wcs.Wcsprm.cunit` to ``"deg"``. For spectral axes, if `~astropy.wcs.Wcsprm.cunit` is not blank, `~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` to SI units. It then resets `~astropy.wcs.Wcsprm.cunit` accordingly. `~astropy.wcs.Wcsprm.set` ignores `~astropy.wcs.Wcsprm.cunit` for other coordinate types; `~astropy.wcs.Wcsprm.cunit` may be used to label coordinate values. """ cylfix = """ cylfix() Fixes WCS keyvalues for malformed cylindrical projections. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ data = """ ``float array`` The array data for the `~astropy.wcs.DistortionLookupTable`. """ data_wtbarr = """ ``double array`` The array data for the BINTABLE. """ dateavg = """ ``string`` Representative mid-point of the date of observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.dateobs """ dateobs = """ ``string`` Start of the date of observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.dateavg """ datfix = """ datfix() Translates the old ``DATE-OBS`` date format to year-2000 standard form ``(yyyy-mm-ddThh:mm:ss)`` and derives ``MJD-OBS`` from it if not already set. Alternatively, if `~astropy.wcs.Wcsprm.mjdobs` is set and `~astropy.wcs.Wcsprm.dateobs` isn't, then `~astropy.wcs.Wcsprm.datfix` derives `~astropy.wcs.Wcsprm.dateobs` from it. If both are set but disagree by more than half a day then `ValueError` is raised. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ delta = """ ``double array[M]`` (read-only) Interpolated indices into the coord array. Array of interpolated indices into the coordinate array such that Upsilon_m, as defined in Paper III, is equal to (`~astropy.wcs.Tabprm.p0` [m] + 1) + delta[m]. """ det2im = """ Convert detector coordinates to image plane coordinates. """ det2im1 = """ A `~astropy.wcs.DistortionLookupTable` object for detector to image plane correction in the *x*-axis. """ det2im2 = """ A `~astropy.wcs.DistortionLookupTable` object for detector to image plane correction in the *y*-axis. """ dims = """ ``int array[ndim]`` (read-only) The dimensions of the tabular array `~astropy.wcs.Wtbarr.data`. """ DistortionLookupTable = """ DistortionLookupTable(*table*, *crpix*, *crval*, *cdelt*) Represents a single lookup table for a `distortion paper`_ transformation. Parameters ---------- table : 2-dimensional array The distortion lookup table. crpix : 2-tuple The distortion array reference pixel crval : 2-tuple The image array pixel coordinate cdelt : 2-tuple The grid step size """ dsun_obs = """ ``double`` Distance between the centre of the Sun and the observer (m). If undefined, this is set to `None`. """ equinox = """ ``double`` The equinox associated with dynamical equatorial or ecliptic coordinate systems. ``EQUINOXa`` (or ``EPOCH`` in older headers). Not applicable to ICRS equatorial or ecliptic coordinates. An undefined value is represented by NaN. """ extlev = """ ``int`` (read-only) ``EXTLEV`` identifying the binary table extension. """ extnam = """ ``str`` (read-only) ``EXTNAME`` identifying the binary table extension. """ extrema = """ ``double array[K_M]...[K_2][2][M]`` (read-only) An array recording the minimum and maximum value of each element of the coordinate vector in each row of the coordinate array, with the dimensions:: (K_M, ... K_2, 2, M) (see `~astropy.wcs.Tabprm.K`). The minimum is recorded in the first element of the compressed K_1 dimension, then the maximum. This array is used by the inverse table lookup function to speed up table searches. """ extver = """ ``int`` (read-only) ``EXTVER`` identifying the binary table extension. """ find_all_wcs = """ find_all_wcs(relax=0, keysel=0) Find all WCS transformations in the header. Parameters ---------- header : str The raw FITS header data. relax : bool or int Degree of permissiveness: - `False`: Recognize only FITS keywords defined by the published WCS standard. - `True`: Admit all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. keysel : sequence of flags Used to restrict the keyword types considered: - ``WCSHDR_IMGHEAD``: Image header keywords. - ``WCSHDR_BIMGARR``: Binary table image array. - ``WCSHDR_PIXLIST``: Pixel list keywords. If zero, there is no restriction. If -1, `wcspih` is called, rather than `wcstbh`. Returns ------- wcs_list : list of `~astropy.wcs.Wcsprm` """ fix = """ fix(translate_units='', naxis=0) Applies all of the corrections handled separately by `~astropy.wcs.Wcsprm.datfix`, `~astropy.wcs.Wcsprm.unitfix`, `~astropy.wcs.Wcsprm.celfix`, `~astropy.wcs.Wcsprm.spcfix`, `~astropy.wcs.Wcsprm.cylfix` and `~astropy.wcs.Wcsprm.cdfix`. Parameters ---------- translate_units : str, optional Specify which potentially unsafe translations of non-standard unit strings to perform. By default, performs all. Although ``"S"`` is commonly used to represent seconds, its translation to ``"s"`` is potentially unsafe since the standard recognizes ``"S"`` formally as Siemens, however rarely that may be used. The same applies to ``"H"`` for hours (Henry), and ``"D"`` for days (Debye). This string controls what to do in such cases, and is case-insensitive. - If the string contains ``"s"``, translate ``"S"`` to ``"s"``. - If the string contains ``"h"``, translate ``"H"`` to ``"h"``. - If the string contains ``"d"``, translate ``"D"`` to ``"d"``. Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'`` does all of them. naxis : int array, optional Image axis lengths. If this array is set to zero or ``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be invoked. Returns ------- status : dict Returns a dictionary containing the following keys, each referring to a status string for each of the sub-fix functions that were called: - `~astropy.wcs.Wcsprm.cdfix` - `~astropy.wcs.Wcsprm.datfix` - `~astropy.wcs.Wcsprm.unitfix` - `~astropy.wcs.Wcsprm.celfix` - `~astropy.wcs.Wcsprm.spcfix` - `~astropy.wcs.Wcsprm.cylfix` """ get_offset = """ get_offset(x, y) -> (x, y) Returns the offset as defined in the distortion lookup table. Returns ------- coordinate : (2,) tuple The offset from the distortion table for pixel point (*x*, *y*). """ get_cdelt = """ get_cdelt() -> numpy.ndarray Coordinate increments (``CDELTia``) for each coord axis as ``double array[naxis]``. Returns the ``CDELT`` offsets in read-only form. Unlike the `~astropy.wcs.Wcsprm.cdelt` property, this works even when the header specifies the linear transformation matrix in one of the alternative ``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access to the linear transformation matrix, but don't care how it was specified in the header. """ get_pc = """ get_pc() -> numpy.ndarray Returns the ``PC`` matrix in read-only form as ``double array[naxis][naxis]``. Unlike the `~astropy.wcs.Wcsprm.pc` property, this works even when the header specifies the linear transformation matrix in one of the alternative ``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access to the linear transformation matrix, but don't care how it was specified in the header. """ get_ps = """ get_ps() -> list Returns ``PSi_ma`` keywords for each *i* and *m* as list of tuples. Returns ------- ps : list Returned as a list of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative) - *value*: string. Parameter value. See also -------- astropy.wcs.Wcsprm.set_ps : Set ``PSi_ma`` values """ get_pv = """ get_pv() -> list Returns ``PVi_ma`` keywords for each *i* and *m* as list of tuples. Returns ------- sequence of tuple Returned as a list of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative) - *value*: string. Parameter value. See also -------- astropy.wcs.Wcsprm.set_pv : Set ``PVi_ma`` values Notes ----- Note that, if they were not given, `~astropy.wcs.Wcsprm.set` resets the entries for ``PVi_1a``, ``PVi_2a``, ``PVi_3a``, and ``PVi_4a`` for longitude axis *i* to match (``phi_0``, ``theta_0``), the native longitude and latitude of the reference point given by ``LONPOLEa`` and ``LATPOLEa``. """ has_cd = """ has_cd() -> bool Returns `True` if ``CDi_ja`` is present. ``CDi_ja`` is an alternate specification of the linear transformation matrix, maintained for historical compatibility. Matrix elements in the IRAF convention are equivalent to the product ``CDi_ja = CDELTia * PCi_ja``, but the defaults differ from that of the ``PCi_ja`` matrix. If one or more ``CDi_ja`` keywords are present then all unspecified ``CDi_ja`` default to zero. If no ``CDi_ja`` (or ``CROTAia``) keywords are present, then the header is assumed to be in ``PCi_ja`` form whether or not any ``PCi_ja`` keywords are present since this results in an interpretation of ``CDELTia`` consistent with the original FITS specification. While ``CDi_ja`` may not formally co-exist with ``PCi_ja``, it may co-exist with ``CDELTia`` and ``CROTAia`` which are to be ignored. See also -------- astropy.wcs.Wcsprm.cd : Get the raw ``CDi_ja`` values. """ has_cdi_ja = """ has_cdi_ja() -> bool Alias for `~astropy.wcs.Wcsprm.has_cd`. Maintained for backward compatibility. """ has_crota = """ has_crota() -> bool Returns `True` if ``CROTAia`` is present. ``CROTAia`` is an alternate specification of the linear transformation matrix, maintained for historical compatibility. In the AIPS convention, ``CROTAia`` may only be associated with the latitude axis of a celestial axis pair. It specifies a rotation in the image plane that is applied *after* the ``CDELTia``; any other ``CROTAia`` keywords are ignored. ``CROTAia`` may not formally co-exist with ``PCi_ja``. ``CROTAia`` and ``CDELTia`` may formally co-exist with ``CDi_ja`` but if so are to be ignored. See also -------- astropy.wcs.Wcsprm.crota : Get the raw ``CROTAia`` values """ has_crotaia = """ has_crotaia() -> bool Alias for `~astropy.wcs.Wcsprm.has_crota`. Maintained for backward compatibility. """ has_pc = """ has_pc() -> bool Returns `True` if ``PCi_ja`` is present. ``PCi_ja`` is the recommended way to specify the linear transformation matrix. See also -------- astropy.wcs.Wcsprm.pc : Get the raw ``PCi_ja`` values """ has_pci_ja = """ has_pci_ja() -> bool Alias for `~astropy.wcs.Wcsprm.has_pc`. Maintained for backward compatibility. """ hgln_obs = """ ``double`` Stonyhurst heliographic longitude of the observer. If undefined, this is set to `None`. """ hglt_obs = """ ``double`` Heliographic latitude (Carrington or Stonyhurst) of the observer (deg). If undefined, this is set to `None`. """ i = """ ``int`` (read-only) Image axis number. """ imgpix_matrix = """ ``double array[2][2]`` (read-only) Inverse of the ``CDELT`` or ``PC`` matrix. Inverse containing the product of the ``CDELTia`` diagonal matrix and the ``PCi_ja`` matrix. """ is_unity = """ is_unity() -> bool Returns `True` if the linear transformation matrix (`~astropy.wcs.Wcsprm.cd`) is unity. """ K = """ ``int array[M]`` (read-only) The lengths of the axes of the coordinate array. An array of length `M` whose elements record the lengths of the axes of the coordinate array and of each indexing vector. """ kind = """ ``str`` (read-only) ``wcstab`` array type. Character identifying the ``wcstab`` array type: - ``'c'``: coordinate array, - ``'i'``: index vector. """ lat = """ ``int`` (read-only) The index into the world coord array containing latitude values. """ latpole = """ ``double`` The native latitude of the celestial pole, ``LATPOLEa`` (deg). """ lattyp = """ ``string`` (read-only) Celestial axis type for latitude. For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--", "DEC-", "GLON", "GLAT", etc. in the first four characters of ``CTYPEia`` but with trailing dashes removed. """ lng = """ ``int`` (read-only) The index into the world coord array containing longitude values. """ lngtyp = """ ``string`` (read-only) Celestial axis type for longitude. For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--", "DEC-", "GLON", "GLAT", etc. in the first four characters of ``CTYPEia`` but with trailing dashes removed. """ lonpole = """ ``double`` The native longitude of the celestial pole. ``LONPOLEa`` (deg). """ M = """ ``int`` (read-only) Number of tabular coordinate axes. """ m = """ ``int`` (read-only) ``wcstab`` axis number for index vectors. """ map = """ ``int array[M]`` Association between axes. A vector of length `~astropy.wcs.Tabprm.M` that defines the association between axis *m* in the *M*-dimensional coordinate array (1 <= *m* <= *M*) and the indices of the intermediate world coordinate and world coordinate arrays. When the intermediate and world coordinate arrays contain the full complement of coordinate elements in image-order, as will usually be the case, then ``map[m-1] == i-1`` for axis *i* in the *N*-dimensional image (1 <= *i* <= *N*). In terms of the FITS keywords:: map[PVi_3a - 1] == i - 1. However, a different association may result if the intermediate coordinates, for example, only contains a (relevant) subset of intermediate world coordinate elements. For example, if *M* == 1 for an image with *N* > 1, it is possible to fill the intermediate coordinates with the relevant coordinate element with ``nelem`` set to 1. In this case ``map[0] = 0`` regardless of the value of *i*. """ mix = """ mix(mixpix, mixcel, vspan, vstep, viter, world, pixcrd, origin) Given either the celestial longitude or latitude plus an element of the pixel coordinate, solves for the remaining elements by iterating on the unknown celestial coordinate element using `~astropy.wcs.Wcsprm.s2p`. Parameters ---------- mixpix : int Which element on the pixel coordinate is given. mixcel : int Which element of the celestial coordinate is given. If *mixcel* = ``1``, celestial longitude is given in ``world[self.lng]``, latitude returned in ``world[self.lat]``. If *mixcel* = ``2``, celestial latitude is given in ``world[self.lat]``, longitude returned in ``world[self.lng]``. vspan : (float, float) Solution interval for the celestial coordinate, in degrees. The ordering of the two limits is irrelevant. Longitude ranges may be specified with any convenient normalization, for example ``(-120,+120)`` is the same as ``(240,480)``, except that the solution will be returned with the same normalization, i.e. lie within the interval specified. vstep : float Step size for solution search, in degrees. If ``0``, a sensible, although perhaps non-optimal default will be used. viter : int If a solution is not found then the step size will be halved and the search recommenced. *viter* controls how many times the step size is halved. The allowed range is 5 - 10. world : ndarray World coordinate elements as ``double array[naxis]``. ``world[self.lng]`` and ``world[self.lat]`` are the celestial longitude and latitude, in degrees. Which is given and which returned depends on the value of *mixcel*. All other elements are given. The results will be written to this array in-place. pixcrd : ndarray Pixel coordinates as ``double array[naxis]``. The element indicated by *mixpix* is given and the remaining elements will be written in-place. {} Returns ------- result : dict Returns a dictionary with the following keys: - *phi* (``double array[naxis]``) - *theta* (``double array[naxis]``) - Longitude and latitude in the native coordinate system of the projection, in degrees. - *imgcrd* (``double array[naxis]``) - Image coordinate elements. ``imgcrd[self.lng]`` and ``imgcrd[self.lat]`` are the projected *x*- and *y*-coordinates, in decimal degrees. - *world* (``double array[naxis]``) - Another reference to the *world* argument passed in. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. InvalidCoordinateError Invalid world coordinate. NoSolutionError No solution found in the specified interval. See also -------- astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng Get the axes numbers for latitude and longitude Notes ----- Initially, the specified solution interval is checked to see if it's a \"crossing\" interval. If it isn't, a search is made for a crossing solution by iterating on the unknown celestial coordinate starting at the upper limit of the solution interval and decrementing by the specified step size. A crossing is indicated if the trial value of the pixel coordinate steps through the value specified. If a crossing interval is found then the solution is determined by a modified form of \"regula falsi\" division of the crossing interval. If no crossing interval was found within the specified solution interval then a search is made for a \"non-crossing\" solution as may arise from a point of tangency. The process is complicated by having to make allowance for the discontinuities that occur in all map projections. Once one solution has been determined others may be found by subsequent invocations of `~astropy.wcs.Wcsprm.mix` with suitably restricted solution intervals. Note the circumstance that arises when the solution point lies at a native pole of a projection in which the pole is represented as a finite curve, for example the zenithals and conics. In such cases two or more valid solutions may exist but `~astropy.wcs.Wcsprm.mix` only ever returns one. Because of its generality, `~astropy.wcs.Wcsprm.mix` is very compute-intensive. For compute-limited applications, more efficient special-case solvers could be written for simple projections, for example non-oblique cylindrical projections. """.format(ORIGIN()) mjdavg = """ ``double`` Modified Julian Date corresponding to ``DATE-AVG``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdobs """ mjdobs = """ ``double`` Modified Julian Date corresponding to ``DATE-OBS``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdavg """ name = """ ``string`` The name given to the coordinate representation ``WCSNAMEa``. """ naxis = """ ``int`` (read-only) The number of axes (pixel and coordinate). Given by the ``NAXIS`` or ``WCSAXESa`` keyvalues. The number of coordinate axes is determined at parsing time, and can not be subsequently changed. It is determined from the highest of the following: 1. ``NAXIS`` 2. ``WCSAXESa`` 3. The highest axis number in any parameterized WCS keyword. The keyvalue, as well as the keyword, must be syntactically valid otherwise it will not be considered. If none of these keyword types is present, i.e. if the header only contains auxiliary WCS keywords for a particular coordinate representation, then no coordinate description is constructed for it. This value may differ for different coordinate representations of the same image. """ nc = """ ``int`` (read-only) Total number of coord vectors in the coord array. Total number of coordinate vectors in the coordinate array being the product K_1 * K_2 * ... * K_M. """ ndim = """ ``int`` (read-only) Expected dimensionality of the ``wcstab`` array. """ obsgeo = """ ``double array[3]`` Location of the observer in a standard terrestrial reference frame. ``OBSGEO-X``, ``OBSGEO-Y``, ``OBSGEO-Z`` (in meters). An undefined value is represented by NaN. """ p0 = """ ``int array[M]`` Interpolated indices into the coordinate array. Vector of length `~astropy.wcs.Tabprm.M` of interpolated indices into the coordinate array such that Upsilon_m, as defined in Paper III, is equal to ``(p0[m] + 1) + delta[m]``. """ p2s = """ p2s(pixcrd, origin) Converts pixel to world coordinates. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- result : dict Returns a dictionary with the following keys: - *imgcrd*: ndarray - Array of intermediate world coordinates as ``double array[ncoord][nelem]``. For celestial axes, ``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the projected *x*-, and *y*-coordinates, in pseudo degrees. For spectral axes, ``imgcrd[][self.spec]`` is the intermediate spectral coordinate, in SI units. - *phi*: ndarray - Array as ``double array[ncoord]``. - *theta*: ndarray - Longitude and latitude in the native coordinate system of the projection, in degrees, as ``double array[ncoord]``. - *world*: ndarray - Array of world coordinates as ``double array[ncoord][nelem]``. For celestial axes, ``world[][self.lng]`` and ``world[][self.lat]`` are the celestial longitude and latitude, in degrees. For spectral axes, ``world[][self.spec]`` is the intermediate spectral coordinate, in SI units. - *stat*: ndarray - Status return value for each coordinate as ``int array[ncoord]``. ``0`` for success, ``1+`` for invalid pixel coordinate. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. ValueError *x*- and *y*-coordinate arrays are not the same size. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. See also -------- astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng Definition of the latitude and longitude axes """.format(ORIGIN()) p4_pix2foc = """ p4_pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]`` Convert pixel coordinates to focal plane coordinates using `distortion paper`_ lookup-table correction. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- foccrd : ndarray Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) pc = """ ``double array[naxis][naxis]`` The ``PCi_ja`` (pixel coordinate) transformation matrix. The order is:: [[PC1_1, PC1_2], [PC2_1, PC2_2]] For historical compatibility, three alternate specifications of the linear transformations are available in wcslib. The canonical ``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated ``CROTAia`` keywords. Although the latter may not formally co-exist with ``PCi_ja``, the approach here is simply to ignore them if given in conjunction with ``PCi_ja``. `~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and `~astropy.wcs.Wcsprm.has_crota` can be used to determine which of these alternatives are present in the header. These alternate specifications of the linear transformation matrix are translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and are nowhere visible to the lower-level routines. In particular, `~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts to a unity ``PCi_ja`` matrix. """ phi0 = """ ``double`` The native latitude of the fiducial point. The point whose celestial coordinates are given in ``ref[1:2]``. If undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`, will set this to a projection-specific default. See also -------- astropy.wcs.Wcsprm.theta0 """ pix2foc = """ pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]`` Perform both `SIP`_ polynomial and `distortion paper`_ lookup-table correction in parallel. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- foccrd : ndarray Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) piximg_matrix = """ ``double array[2][2]`` (read-only) Matrix containing the product of the ``CDELTia`` diagonal matrix and the ``PCi_ja`` matrix. """ print_contents = """ print_contents() Print the contents of the `~astropy.wcs.Wcsprm` object to stdout. Probably only useful for debugging purposes, and may be removed in the future. To get a string of the contents, use `repr`. """ print_contents_tabprm = """ print_contents() Print the contents of the `~astropy.wcs.Tabprm` object to stdout. Probably only useful for debugging purposes, and may be removed in the future. To get a string of the contents, use `repr`. """ print_contents_wtbarr = """ print_contents() Print the contents of the `~astropy.wcs.Wtbarr` object to stdout. Probably only useful for debugging purposes, and may be removed in the future. To get a string of the contents, use `repr`. """ radesys = """ ``string`` The equatorial or ecliptic coordinate system type, ``RADESYSa``. """ restfrq = """ ``double`` Rest frequency (Hz) from ``RESTFRQa``. An undefined value is represented by NaN. """ restwav = """ ``double`` Rest wavelength (m) from ``RESTWAVa``. An undefined value is represented by NaN. """ row = """ ``int`` (read-only) Table row number. """ rsun_ref = """ ``double`` Reference radius of the Sun used in coordinate calculations (m). If undefined, this is set to `None`. """ s2p = """ s2p(world, origin) Transforms world coordinates to pixel coordinates. Parameters ---------- world : ndarray Array of world coordinates, in decimal degrees, as ``double array[ncoord][nelem]``. {} Returns ------- result : dict Returns a dictionary with the following keys: - *phi*: ``double array[ncoord]`` - *theta*: ``double array[ncoord]`` - Longitude and latitude in the native coordinate system of the projection, in degrees. - *imgcrd*: ``double array[ncoord][nelem]`` - Array of intermediate world coordinates. For celestial axes, ``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the projected *x*-, and *y*-coordinates, in pseudo \"degrees\". For quadcube projections with a ``CUBEFACE`` axis, the face number is also returned in ``imgcrd[][self.cubeface]``. For spectral axes, ``imgcrd[][self.spec]`` is the intermediate spectral coordinate, in SI units. - *pixcrd*: ``double array[ncoord][nelem]`` - Array of pixel coordinates. Pixel coordinates are zero-based. - *stat*: ``int array[ncoord]`` - Status return value for each coordinate. ``0`` for success, ``1+`` for invalid pixel coordinate. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. See also -------- astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng Definition of the latitude and longitude axes """.format(ORIGIN()) sense = """ ``int array[M]`` +1 if monotonically increasing, -1 if decreasing. A vector of length `~astropy.wcs.Tabprm.M` whose elements indicate whether the corresponding indexing vector is monotonically increasing (+1), or decreasing (-1). """ set = """ set() Sets up a WCS object for use according to information supplied within it. Note that this routine need not be called directly; it will be invoked by `~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` if necessary. Some attributes that are based on other attributes (such as `~astropy.wcs.Wcsprm.lattyp` on `~astropy.wcs.Wcsprm.ctype`) may not be correct until after `~astropy.wcs.Wcsprm.set` is called. `~astropy.wcs.Wcsprm.set` strips off trailing blanks in all string members. `~astropy.wcs.Wcsprm.set` recognizes the ``NCP`` projection and converts it to the equivalent ``SIN`` projection and it also recognizes ``GLS`` as a synonym for ``SFL``. It does alias translation for the AIPS spectral types (``FREQ-LSR``, ``FELO-HEL``, etc.) but without changing the input header keywords. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. """ set_tabprm = """ set() Allocates memory for work arrays. Also sets up the class according to information supplied within it. Note that this routine need not be called directly; it will be invoked by functions that need it. Raises ------ MemoryError Memory allocation failed. InvalidTabularParametersError Invalid tabular parameters. """ set_celprm = """ set() Sets up a ``celprm`` struct according to information supplied within it. Note that this routine need not be called directly; it will be invoked by functions that need it. Raises ------ MemoryError Memory allocation failed. InvalidPrjParametersError Invalid celestial parameters. """ set_ps = """ set_ps(ps) Sets ``PSi_ma`` keywords for each *i* and *m*. Parameters ---------- ps : sequence of tuple The input must be a sequence of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative) - *value*: string. Parameter value. See also -------- astropy.wcs.Wcsprm.get_ps """ set_pv = """ set_pv(pv) Sets ``PVi_ma`` keywords for each *i* and *m*. Parameters ---------- pv : list of tuple The input must be a sequence of tuples of the form (*i*, *m*, *value*): - *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative) - *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative) - *value*: float. Parameter value. See also -------- astropy.wcs.Wcsprm.get_pv """ sip = """ Get/set the `~astropy.wcs.Sip` object for performing `SIP`_ distortion correction. """ Sip = """ Sip(*a, b, ap, bp, crpix*) The `~astropy.wcs.Sip` class performs polynomial distortion correction using the `SIP`_ convention in both directions. Parameters ---------- a : ndarray The ``A_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``A_ORDER``. b : ndarray The ``B_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``B_ORDER``. ap : ndarray The ``AP_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``AP_ORDER``. bp : ndarray The ``BP_i_j`` polynomial for pixel to focal plane transformation as ``double array[m+1][m+1]``. Its size must be (*m* + 1, *m* + 1) where *m* = ``BP_ORDER``. crpix : ndarray The reference pixel as ``double array[2]``. Notes ----- Shupe, D. L., M. Moshir, J. Li, D. Makovoz and R. Narron. 2005. "The SIP Convention for Representing Distortion in FITS Image Headers." ADASS XIV. """ sip_foc2pix = """ sip_foc2pix(*foccrd, origin*) -> ``double array[ncoord][nelem]`` Convert focal plane coordinates to pixel coordinates using the `SIP`_ polynomial distortion convention. Parameters ---------- foccrd : ndarray Array of focal plane coordinates as ``double array[ncoord][nelem]``. {} Returns ------- pixcrd : ndarray Returns an array of pixel coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) sip_pix2foc = """ sip_pix2foc(*pixcrd, origin*) -> ``double array[ncoord][nelem]`` Convert pixel coordinates to focal plane coordinates using the `SIP`_ polynomial distortion convention. Parameters ---------- pixcrd : ndarray Array of pixel coordinates as ``double array[ncoord][nelem]``. {} Returns ------- foccrd : ndarray Returns an array of focal plane coordinates as ``double array[ncoord][nelem]``. Raises ------ MemoryError Memory allocation failed. ValueError Invalid coordinate transformation parameters. """.format(ORIGIN()) spcfix = """ spcfix() -> int Translates AIPS-convention spectral coordinate types. {``FREQ``, ``VELO``, ``FELO``}-{``OBS``, ``HEL``, ``LSR``} (e.g. ``FREQ-LSR``, ``VELO-OBS``, ``FELO-HEL``) Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ spec = """ ``int`` (read-only) The index containing the spectral axis values. """ specsys = """ ``string`` Spectral reference frame (standard of rest), ``SPECSYSa``. See also -------- astropy.wcs.Wcsprm.ssysobs, astropy.wcs.Wcsprm.velosys """ sptr = """ sptr(ctype, i=-1) Translates the spectral axis in a WCS object. For example, a ``FREQ`` axis may be translated into ``ZOPT-F2W`` and vice versa. Parameters ---------- ctype : str Required spectral ``CTYPEia``, maximum of 8 characters. The first four characters are required to be given and are never modified. The remaining four, the algorithm code, are completely determined by, and must be consistent with, the first four characters. Wildcarding may be used, i.e. if the final three characters are specified as ``\"???\"``, or if just the eighth character is specified as ``\"?\"``, the correct algorithm code will be substituted and returned. i : int Index of the spectral axis (0-relative). If ``i < 0`` (or not provided), it will be set to the first spectral axis identified from the ``CTYPE`` keyvalues in the FITS header. Raises ------ MemoryError Memory allocation failed. SingularMatrixError Linear transformation matrix is singular. InconsistentAxisTypesError Inconsistent or unrecognized coordinate axis types. ValueError Invalid parameter value. InvalidTransformError Invalid coordinate transformation parameters. InvalidTransformError Ill-conditioned coordinate transformation parameters. InvalidSubimageSpecificationError Invalid subimage specification (no spectral axis). """ ssysobs = """ ``string`` Spectral reference frame. The spectral reference frame in which there is no differential variation in the spectral coordinate across the field-of-view, ``SSYSOBSa``. See also -------- astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.velosys """ ssyssrc = """ ``string`` Spectral reference frame for redshift. The spectral reference frame (standard of rest) in which the redshift was measured, ``SSYSSRCa``. """ sub = """ sub(axes) Extracts the coordinate description for a subimage from a `~astropy.wcs.WCS` object. The world coordinate system of the subimage must be separable in the sense that the world coordinates at any point in the subimage must depend only on the pixel coordinates of the axes extracted. In practice, this means that the ``PCi_ja`` matrix of the original image must not contain non-zero off-diagonal terms that associate any of the subimage axes with any of the non-subimage axes. `sub` can also add axes to a wcsprm object. The new axes will be created using the defaults set by the Wcsprm constructor which produce a simple, unnamed, linear axis with world coordinates equal to the pixel coordinate. These default values can be changed before invoking `set`. Parameters ---------- axes : int or a sequence. - If an int, include the first *N* axes in their original order. - If a sequence, may contain a combination of image axis numbers (1-relative) or special axis identifiers (see below). Order is significant; ``axes[0]`` is the axis number of the input image that corresponds to the first axis in the subimage, etc. Use an axis number of 0 to create a new axis using the defaults. - If ``0``, ``[]`` or ``None``, do a deep copy. Coordinate axes types may be specified using either strings or special integer constants. The available types are: - ``'longitude'`` / ``WCSSUB_LONGITUDE``: Celestial longitude - ``'latitude'`` / ``WCSSUB_LATITUDE``: Celestial latitude - ``'cubeface'`` / ``WCSSUB_CUBEFACE``: Quadcube ``CUBEFACE`` axis - ``'spectral'`` / ``WCSSUB_SPECTRAL``: Spectral axis - ``'stokes'`` / ``WCSSUB_STOKES``: Stokes axis - ``'celestial'`` / ``WCSSUB_CELESTIAL``: An alias for the combination of ``'longitude'``, ``'latitude'`` and ``'cubeface'``. Returns ------- new_wcs : `~astropy.wcs.WCS` object Raises ------ MemoryError Memory allocation failed. InvalidSubimageSpecificationError Invalid subimage specification (no spectral axis). NonseparableSubimageCoordinateSystemError Non-separable subimage coordinate system. Notes ----- Combinations of subimage axes of particular types may be extracted in the same order as they occur in the input image by combining the integer constants with the 'binary or' (``|``) operator. For example:: wcs.sub([WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_SPECTRAL]) would extract the longitude, latitude, and spectral axes in the same order as the input image. If one of each were present, the resulting object would have three dimensions. For convenience, ``WCSSUB_CELESTIAL`` is defined as the combination ``WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_CUBEFACE``. The codes may also be negated to extract all but the types specified, for example:: wcs.sub([ WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_CUBEFACE, -(WCSSUB_SPECTRAL | WCSSUB_STOKES)]) The last of these specifies all axis types other than spectral or Stokes. Extraction is done in the order specified by ``axes``, i.e. a longitude axis (if present) would be extracted first (via ``axes[0]``) and not subsequently (via ``axes[3]``). Likewise for the latitude and cubeface axes in this example. The number of dimensions in the returned object may be less than or greater than the length of ``axes``. However, it will never exceed the number of axes in the input image. """ tab = """ ``list of Tabprm`` Tabular coordinate objects. A list of tabular coordinate objects associated with this WCS. """ Tabprm = """ A class to store the information related to tabular coordinates, i.e., coordinates that are defined via a lookup table. This class can not be constructed directly from Python, but instead is returned from `~astropy.wcs.Wcsprm.tab`. """ theta0 = """ ``double`` The native longitude of the fiducial point. The point whose celestial coordinates are given in ``ref[1:2]``. If undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`, will set this to a projection-specific default. See also -------- astropy.wcs.Wcsprm.phi0 """ to_header = """ to_header(relax=False) `to_header` translates a WCS object into a FITS header. The details of the header depends on context: - If the `~astropy.wcs.Wcsprm.colnum` member is non-zero then a binary table image array header will be produced. - Otherwise, if the `~astropy.wcs.Wcsprm.colax` member is set non-zero then a pixel list header will be produced. - Otherwise, a primary image or image extension header will be produced. The output header will almost certainly differ from the input in a number of respects: 1. The output header only contains WCS-related keywords. In particular, it does not contain syntactically-required keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or ``END``. 2. Deprecated (e.g. ``CROTAn``) or non-standard usage will be translated to standard (this is partially dependent on whether ``fix`` was applied). 3. Quantities will be converted to the units used internally, basically SI with the addition of degrees. 4. Floating-point quantities may be given to a different decimal precision. 5. Elements of the ``PCi_j`` matrix will be written if and only if they differ from the unit matrix. Thus, if the matrix is unity then no elements will be written. 6. Additional keywords such as ``WCSAXES``, ``CUNITia``, ``LONPOLEa`` and ``LATPOLEa`` may appear. 7. The original keycomments will be lost, although `~astropy.wcs.Wcsprm.to_header` tries hard to write meaningful comments. 8. Keyword order may be changed. Keywords can be translated between the image array, binary table, and pixel lists forms by manipulating the `~astropy.wcs.Wcsprm.colnum` or `~astropy.wcs.Wcsprm.colax` members of the `~astropy.wcs.WCS` object. Parameters ---------- relax : bool or int Degree of permissiveness: - `False`: Recognize only FITS keywords defined by the published WCS standard. - `True`: Admit all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to write. See :ref:`astropy:relaxwrite` for details. Returns ------- header : str Raw FITS header as a string. """ ttype = """ ``str`` (read-only) ``TTYPEn`` identifying the column of the binary table that contains the wcstab array. """ unitfix = """ unitfix(translate_units='') Translates non-standard ``CUNITia`` keyvalues. For example, ``DEG`` -> ``deg``, also stripping off unnecessary whitespace. Parameters ---------- translate_units : str, optional Do potentially unsafe translations of non-standard unit strings. Although ``\"S\"`` is commonly used to represent seconds, its recognizes ``\"S\"`` formally as Siemens, however rarely that may be translation to ``\"s\"`` is potentially unsafe since the standard used. The same applies to ``\"H\"`` for hours (Henry), and ``\"D\"`` for days (Debye). This string controls what to do in such cases, and is case-insensitive. - If the string contains ``\"s\"``, translate ``\"S\"`` to ``\"s\"``. - If the string contains ``\"h\"``, translate ``\"H\"`` to ``\"h\"``. - If the string contains ``\"d\"``, translate ``\"D\"`` to ``\"d\"``. Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'`` does all of them. Returns ------- success : int Returns ``0`` for success; ``-1`` if no change required. """ velangl = """ ``double`` Velocity angle. The angle in degrees that should be used to decompose an observed velocity into radial and transverse components. An undefined value is represented by NaN. """ velosys = """ ``double`` Relative radial velocity. The relative radial velocity (m/s) between the observer and the selected standard of rest in the direction of the celestial reference coordinate, ``VELOSYSa``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.ssysobs """ velref = """ ``int`` AIPS velocity code. From ``VELREF`` keyword. """ wcs = """ A `~astropy.wcs.Wcsprm` object to perform the basic `wcslib`_ WCS transformation. """ Wcs = """ Wcs(*sip, cpdis, wcsprm, det2im*) Wcs objects amalgamate basic WCS (as provided by `wcslib`_), with `SIP`_ and `distortion paper`_ operations. To perform all distortion corrections and WCS transformation, use ``all_pix2world``. Parameters ---------- sip : `~astropy.wcs.Sip` object or None cpdis : (2,) tuple of `~astropy.wcs.DistortionLookupTable` or None wcsprm : `~astropy.wcs.Wcsprm` det2im : (2,) tuple of `~astropy.wcs.DistortionLookupTable` or None """ Wcsprm = """ Wcsprm(header=None, key=' ', relax=False, naxis=2, keysel=0, colsel=None) `~astropy.wcs.Wcsprm` performs the core WCS transformations. .. note:: The members of this object correspond roughly to the key/value pairs in the FITS header. However, they are adjusted and normalized in a number of ways that make performing the WCS transformation easier. Therefore, they can not be relied upon to get the original values in the header. For that, use `astropy.io.fits.Header` directly. The FITS header parsing enforces correct FITS "keyword = value" syntax with regard to the equals sign occurring in columns 9 and 10. However, it does recognize free-format character (NOST 100-2.0, Sect. 5.2.1), integer (Sect. 5.2.3), and floating-point values (Sect. 5.2.4) for all keywords. .. warning:: Many of the attributes of this class require additional processing when modifying underlying C structure. When needed, this additional processing is implemented in attribute setters. Therefore, for mutable attributes, one should always set the attribute rather than a slice of its current value (or its individual elements) since the latter may lead the class instance to be in an invalid state. For example, attribute ``crpix`` of a 2D WCS' ``Wcsprm`` object ``wcs`` should be set as ``wcs.crpix = [crpix1, crpix2]`` instead of ``wcs.crpix[0] = crpix1; wcs.crpix[1] = crpix2]``. Parameters ---------- header : `~astropy.io.fits.Header`, str, or None. If ``None``, the object will be initialized to default values. key : str, optional The key referring to a particular WCS transform in the header. This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``\"a\"`` part of ``\"CTYPEia\"``. (*key* may only be provided if *header* is also provided.) relax : bool or int, optional Degree of permissiveness: - `False`: Recognize only FITS keywords defined by the published WCS standard. - `True`: Admit all recognized informal extensions of the WCS standard. - `int`: a bit field selecting specific extensions to accept. See :ref:`astropy:relaxread` for details. naxis : int, optional The number of world coordinates axes for the object. (*naxis* may only be provided if *header* is `None`.) keysel : sequence of flag bits, optional Vector of flag bits that may be used to restrict the keyword types considered: - ``WCSHDR_IMGHEAD``: Image header keywords. - ``WCSHDR_BIMGARR``: Binary table image array. - ``WCSHDR_PIXLIST``: Pixel list keywords. If zero, there is no restriction. If -1, the underlying wcslib function ``wcspih()`` is called, rather than ``wcstbh()``. colsel : sequence of int A sequence of table column numbers used to restrict the keywords considered. `None` indicates no restriction. Raises ------ MemoryError Memory allocation failed. ValueError Invalid key. KeyError Key not found in FITS header. """ wtb = """ ``list of Wtbarr`` objects to construct coordinate lookup tables from BINTABLE. """ Wtbarr = """ Classes to construct coordinate lookup tables from a binary table extension (BINTABLE). This class can not be constructed directly from Python, but instead is returned from `~astropy.wcs.Wcsprm.wtb`. """ zsource = """ ``double`` The redshift, ``ZSOURCEa``, of the source. An undefined value is represented by NaN. """ WcsError = """ Base class of all invalid WCS errors. """ SingularMatrix = """ SingularMatrixError() The linear transformation matrix is singular. """ InconsistentAxisTypes = """ InconsistentAxisTypesError() The WCS header inconsistent or unrecognized coordinate axis type(s). """ InvalidTransform = """ InvalidTransformError() The WCS transformation is invalid, or the transformation parameters are invalid. """ InvalidCoordinate = """ InvalidCoordinateError() One or more of the world coordinates is invalid. """ NoSolution = """ NoSolutionError() No solution can be found in the given interval. """ InvalidSubimageSpecification = """ InvalidSubimageSpecificationError() The subimage specification is invalid. """ NonseparableSubimageCoordinateSystem = """ NonseparableSubimageCoordinateSystemError() Non-separable subimage coordinate system. """ NoWcsKeywordsFound = """ NoWcsKeywordsFoundError() No WCS keywords were found in the given header. """ InvalidTabularParameters = """ InvalidTabularParametersError() The given tabular parameters are invalid. """ InvalidPrjParameters = """ InvalidPrjParametersError() The given projection parameters are invalid. """ mjdbeg = """ ``double`` Modified Julian Date corresponding to ``DATE-BEG``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdbeg """ mjdend = """ ``double`` Modified Julian Date corresponding to ``DATE-END``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.mjdend """ mjdref = """ ``double`` Modified Julian Date corresponding to ``DATE-REF``. ``(MJD = JD - 2400000.5)``. An undefined value is represented by NaN. See also -------- astropy.wcs.Wcsprm.dateref """ bepoch = """ ``double`` Equivalent to ``DATE-OBS``. Expressed as a Besselian epoch. See also -------- astropy.wcs.Wcsprm.dateobs """ jepoch = """ ``double`` Equivalent to ``DATE-OBS``. Expressed as a Julian epoch. See also -------- astropy.wcs.Wcsprm.dateobs """ datebeg = """ ``string`` Date at the start of the observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.datebeg """ dateend = """ ``string`` Date at the end of the observation. In ISO format, ``yyyy-mm-ddThh:mm:ss``. See also -------- astropy.wcs.Wcsprm.dateend """ dateref = """ ``string`` Date of a reference epoch relative to which other time measurements refer. See also -------- astropy.wcs.Wcsprm.dateref """ timesys = """ ``string`` Time scale (UTC, TAI, etc.) in which all other time-related auxiliary header values are recorded. Also defines the time scale for an image axis with CTYPEia set to 'TIME'. See also -------- astropy.wcs.Wcsprm.timesys """ trefpos = """ ``string`` Location in space where the recorded time is valid. See also -------- astropy.wcs.Wcsprm.trefpos """ trefdir = """ ``string`` Reference direction used in calculating a pathlength delay. See also -------- astropy.wcs.Wcsprm.trefdir """ timeunit = """ ``string`` Time units in which the following header values are expressed: ``TSTART``, ``TSTOP``, ``TIMEOFFS``, ``TIMSYER``, ``TIMRDER``, ``TIMEDEL``. It also provides the default value for ``CUNITia`` for time axes. See also -------- astropy.wcs.Wcsprm.trefdir """ plephem = """ ``string`` The Solar System ephemeris used for calculating a pathlength delay. See also -------- astropy.wcs.Wcsprm.plephem """ tstart = """ ``double`` equivalent to DATE-BEG expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS. See also -------- astropy.wcs.Wcsprm.tstop """ tstop = """ ``double`` equivalent to DATE-END expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS. See also -------- astropy.wcs.Wcsprm.tstart """ telapse = """ ``double`` equivalent to the elapsed time between DATE-BEG and DATE-END, in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.tstart """ timeoffs = """ ``double`` Time offset, which may be used, for example, to provide a uniform clock correction for times referenced to DATEREF. See also -------- astropy.wcs.Wcsprm.timeoffs """ timsyer = """ ``double`` the absolute error of the time values, in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.timrder """ timrder = """ ``double`` the accuracy of time stamps relative to each other, in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.timsyer """ timedel = """ ``double`` the resolution of the time stamps. See also -------- astropy.wcs.Wcsprm.timedel """ timepixr = """ ``double`` relative position of the time stamps in binned time intervals, a value between 0.0 and 1.0. See also -------- astropy.wcs.Wcsprm.timepixr """ obsorbit = """ ``string`` URI, URL, or name of an orbit ephemeris file giving spacecraft coordinates relating to TREFPOS. See also -------- astropy.wcs.Wcsprm.trefpos """ xposure = """ ``double`` effective exposure time in units of TIMEUNIT. See also -------- astropy.wcs.Wcsprm.timeunit """ czphs = """ ``double array[naxis]`` The time at the zero point of a phase axis, ``CSPHSia``. An undefined value is represented by NaN. """ cperi = """ ``double array[naxis]`` period of a phase axis, CPERIia. An undefined value is represented by NaN. """
6c0b095f7774223e8e89fa1858bc059d897b7ae45f2aeedba9c9aa2fc17541cf
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ The astropy.time package provides functionality for manipulating times and dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI, UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in astronomy. """ import os import copy import enum import operator import threading from datetime import datetime, date, timedelta from time import strftime from warnings import warn import numpy as np import erfa from astropy import units as u, constants as const from astropy.units import UnitConversionError from astropy.utils import ShapedLikeNDArray from astropy.utils.compat.misc import override__dir__ from astropy.utils.data_info import MixinInfo, data_info_factory from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning from .utils import day_frac from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS, TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime) # Import TimeFromEpoch to avoid breaking code that followed the old example of # making a custom timescale in the documentation. from .formats import TimeFromEpoch # noqa from astropy.extern import _strptime __all__ = ['TimeBase', 'Time', 'TimeDelta', 'TimeInfo', 'update_leap_seconds', 'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES', 'ScaleValueError', 'OperandTypeError', 'TimeDeltaMissingUnitWarning'] STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') LOCAL_SCALES = ('local',) TIME_TYPES = dict((scale, scales) for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales) TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'), ('tai', 'tcg'): ('tt',), ('tai', 'ut1'): ('utc',), ('tai', 'tdb'): ('tt',), ('tcb', 'tcg'): ('tdb', 'tt'), ('tcb', 'tt'): ('tdb',), ('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'), ('tcb', 'utc'): ('tdb', 'tt', 'tai'), ('tcg', 'tdb'): ('tt',), ('tcg', 'ut1'): ('tt', 'tai', 'utc'), ('tcg', 'utc'): ('tt', 'tai'), ('tdb', 'ut1'): ('tt', 'tai', 'utc'), ('tdb', 'utc'): ('tt', 'tai'), ('tt', 'ut1'): ('tai', 'utc'), ('tt', 'utc'): ('tai',), } GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg') BARYCENTRIC_SCALES = ('tcb', 'tdb') ROTATIONAL_SCALES = ('ut1',) TIME_DELTA_TYPES = dict((scale, scales) for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES, ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales) TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES # For time scale changes, we need L_G and L_B, which are stored in erfam.h as # /* L_G = 1 - d(TT)/d(TCG) */ # define ERFA_ELG (6.969290134e-10) # /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */ # define ERFA_ELB (1.550519768e-8) # These are exposed in erfa as erfa.ELG and erfa.ELB. # Implied: d(TT)/d(TCG) = 1-L_G # and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G) # scale offsets as second = first + first * scale_offset[(first,second)] SCALE_OFFSETS = {('tt', 'tai'): None, ('tai', 'tt'): None, ('tcg', 'tt'): -erfa.ELG, ('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG), ('tcg', 'tai'): -erfa.ELG, ('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG), ('tcb', 'tdb'): -erfa.ELB, ('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)} # triple-level dictionary, yay! SIDEREAL_TIME_MODELS = { 'mean': { 'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')}, 'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')}, 'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',), 'include_tio': False} }, 'apparent': { 'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')}, 'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')}, 'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)}, 'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',), 'include_tio': False} }} class _LeapSecondsCheck(enum.Enum): NOT_STARTED = 0 # No thread has reached the check RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held) DONE = 2 # update_leap_seconds has completed _LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED _LEAP_SECONDS_LOCK = threading.RLock() class TimeInfo(MixinInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attr_names = MixinInfo.attr_names | {'serialize_method'} _supports_indexing = True # The usual tuple of attributes needed for serialization is replaced # by a property, since Time can be serialized different ways. _represent_as_dict_extra_attrs = ('format', 'scale', 'precision', 'in_subfmt', 'out_subfmt', 'location', '_delta_ut1_utc', '_delta_tdb_tt') # When serializing, write out the `value` attribute using the column name. _represent_as_dict_primary_data = 'value' mask_val = np.ma.masked @property def _represent_as_dict_attrs(self): method = self.serialize_method[self._serialize_context] if method == 'formatted_value': out = ('value',) elif method == 'jd1_jd2': out = ('jd1', 'jd2') else: raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'") return out + self._represent_as_dict_extra_attrs def __init__(self, bound=False): super().__init__(bound) # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. if bound: # Specify how to serialize this object depending on context. # If ``True`` for a context, then use formatted ``value`` attribute # (e.g. the ISO time string). If ``False`` then use float jd1 and jd2. self.serialize_method = {'fits': 'jd1_jd2', 'ecsv': 'formatted_value', 'hdf5': 'jd1_jd2', 'yaml': 'jd1_jd2', 'parquet': 'jd1_jd2', None: 'jd1_jd2'} def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. Returns ------- arrays : list of ndarray """ parent = self._parent jd_approx = parent.jd jd_remainder = (parent - parent.__class__(jd_approx, format='jd')).jd return [jd_approx, jd_remainder] @property def unit(self): return None info_summary_stats = staticmethod( data_info_factory(names=MixinInfo._stats, funcs=[getattr(np, stat) for stat in MixinInfo._stats])) # When Time has mean, std, min, max methods: # funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats]) def _construct_from_dict_base(self, map): if 'jd1' in map and 'jd2' in map: # Initialize as JD but revert to desired format and out_subfmt (if needed) format = map.pop('format') out_subfmt = map.pop('out_subfmt', None) map['format'] = 'jd' map['val'] = map.pop('jd1') map['val2'] = map.pop('jd2') out = self._parent_cls(**map) out.format = format if out_subfmt is not None: out.out_subfmt = out_subfmt else: map['val'] = map.pop('value') out = self._parent_cls(**map) return out def _construct_from_dict(self, map): delta_ut1_utc = map.pop('_delta_ut1_utc', None) delta_tdb_tt = map.pop('_delta_tdb_tt', None) out = self._construct_from_dict_base(map) if delta_ut1_utc is not None: out._delta_ut1_utc = delta_ut1_utc if delta_tdb_tt is not None: out._delta_tdb_tt = delta_tdb_tt return out def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new Time instance which is consistent with the input Time objects ``cols`` and has ``length`` rows. This is intended for creating an empty Time instance whose elements can be set in-place for table operations like join or vstack. It checks that the input locations and attributes are consistent. This is used when a Time object is used as a mixin column in an astropy Table. Parameters ---------- cols : list List of input columns (Time objects) length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Time (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'description')) attrs.pop('dtype') # Not relevant for Time col0 = cols[0] # Check that location is consistent for all Time objects for col in cols[1:]: # This is the method used by __setitem__ to ensure that the right side # has a consistent location (and coerce data if necessary, but that does # not happen in this case since `col` is already a Time object). If this # passes then any subsequent table operations via setitem will work. try: col0._make_value_equivalent(slice(None), col) except ValueError: raise ValueError('input columns have inconsistent locations') # Make a new Time object with the desired shape and attributes shape = (length,) + attrs.pop('shape') jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA jd1 = np.full(shape, jd2000, dtype='f8') jd2 = np.zeros(shape, dtype='f8') tm_attrs = {attr: getattr(col0, attr) for attr in ('scale', 'location', 'precision', 'in_subfmt', 'out_subfmt')} out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs) out.format = col0.format # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class TimeDeltaInfo(TimeInfo): _represent_as_dict_extra_attrs = ('format', 'scale') def _construct_from_dict(self, map): return self._construct_from_dict_base(map) def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new TimeDelta instance which is consistent with the input Time objects ``cols`` and has ``length`` rows. This is intended for creating an empty Time instance whose elements can be set in-place for table operations like join or vstack. It checks that the input locations and attributes are consistent. This is used when a Time object is used as a mixin column in an astropy Table. Parameters ---------- cols : list List of input columns (Time objects) length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Time (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'description')) attrs.pop('dtype') # Not relevant for Time col0 = cols[0] # Make a new Time object with the desired shape and attributes shape = (length,) + attrs.pop('shape') jd1 = np.zeros(shape, dtype='f8') jd2 = np.zeros(shape, dtype='f8') out = self._parent_cls(jd1, jd2, format='jd', scale=col0.scale) out.format = col0.format # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class TimeBase(ShapedLikeNDArray): """Base time class from which Time and TimeDelta inherit.""" # Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__) # gets called over the __mul__ of Numpy arrays. __array_priority__ = 20000 # Declare that Time can be used as a Table column by defining the # attribute where column attributes will be stored. _astropy_column_attrs = None def __getnewargs__(self): return (self._time,) def _init_from_vals(self, val, val2, format, scale, copy, precision=None, in_subfmt=None, out_subfmt=None): """ Set the internal _format, scale, and _time attrs from user inputs. This handles coercion into the correct shapes and some basic input validation. """ if precision is None: precision = 3 if in_subfmt is None: in_subfmt = '*' if out_subfmt is None: out_subfmt = '*' # Coerce val into an array val = _make_array(val, copy) # If val2 is not None, ensure consistency if val2 is not None: val2 = _make_array(val2, copy) try: np.broadcast(val, val2) except ValueError: raise ValueError('Input val and val2 have inconsistent shape; ' 'they cannot be broadcast together.') if scale is not None: if not (isinstance(scale, str) and scale.lower() in self.SCALES): raise ScaleValueError("Scale {!r} is not in the allowed scales " "{}".format(scale, sorted(self.SCALES))) # If either of the input val, val2 are masked arrays then # find the masked elements and fill them. mask, val, val2 = _check_for_masked_and_fill(val, val2) # Parse / convert input values into internal jd1, jd2 based on format self._time = self._get_time_fmt(val, val2, format, scale, precision, in_subfmt, out_subfmt) self._format = self._time.name # Hack from #9969 to allow passing the location value that has been # collected by the TimeAstropyTime format class up to the Time level. # TODO: find a nicer way. if hasattr(self._time, '_location'): self.location = self._time._location del self._time._location # If any inputs were masked then masked jd2 accordingly. From above # routine ``mask`` must be either Python bool False or an bool ndarray # with shape broadcastable to jd2. if mask is not False: mask = np.broadcast_to(mask, self._time.jd2.shape) self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01 self._time.jd2[mask] = np.nan def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt): """ Given the supplied val, val2, format and scale try to instantiate the corresponding TimeFormat class to convert the input values into the internal jd1 and jd2. If format is `None` and the input is a string-type or object array then guess available formats and stop when one matches. """ if (format is None and (val.dtype.kind in ('S', 'U', 'O', 'M') or val.dtype.names)): # Input is a string, object, datetime, or a table-like ndarray # (structured array, recarray). These input types can be # uniquely identified by the format classes. formats = [(name, cls) for name, cls in self.FORMATS.items() if issubclass(cls, TimeUnique)] # AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry, # but try to guess it at the end. formats.append(('astropy_time', TimeAstropyTime)) elif not (isinstance(format, str) and format.lower() in self.FORMATS): if format is None: raise ValueError("No time format was given, and the input is " "not unique") else: raise ValueError("Format {!r} is not one of the allowed " "formats {}".format(format, sorted(self.FORMATS))) else: formats = [(format, self.FORMATS[format])] assert formats problems = {} for name, cls in formats: try: return cls(val, val2, scale, precision, in_subfmt, out_subfmt) except UnitConversionError: raise except (ValueError, TypeError) as err: # If ``format`` specified then there is only one possibility, so raise # immediately and include the upstream exception message to make it # easier for user to see what is wrong. if len(formats) == 1: raise ValueError( f'Input values did not match the format class {format}:' + os.linesep + f'{err.__class__.__name__}: {err}' ) from err else: problems[name] = err else: raise ValueError(f'Input values did not match any of the formats ' f'where the format keyword is optional: ' f'{problems}') from problems[formats[0][0]] @property def writeable(self): return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable @writeable.setter def writeable(self, value): self._time.jd1.flags.writeable = value self._time.jd2.flags.writeable = value @property def format(self): """ Get or set time format. The format defines the way times are represented when accessed via the ``.value`` attribute. By default it is the same as the format used for initializing the `Time` instance, but it can be set to any other value that could be used for initialization. These can be listed with:: >>> list(Time.FORMATS) ['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date', 'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str'] """ return self._format @format.setter def format(self, format): """Set time format""" if format not in self.FORMATS: raise ValueError(f'format must be one of {list(self.FORMATS)}') format_cls = self.FORMATS[format] # Get the new TimeFormat object to contain time in new format. Possibly # coerce in/out_subfmt to '*' (default) if existing subfmt values are # not valid in the new format. self._time = format_cls( self._time.jd1, self._time.jd2, self._time._scale, self.precision, in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt), out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt), from_jd=True) self._format = format def __repr__(self): return ("<{} object: scale='{}' format='{}' value={}>" .format(self.__class__.__name__, self.scale, self.format, getattr(self, self.format))) def __str__(self): return str(getattr(self, self.format)) def __hash__(self): try: loc = getattr(self, 'location', None) if loc is not None: loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m) return hash((self.jd1, self.jd2, self.scale, loc)) except TypeError: if self.ndim != 0: reason = '(must be scalar)' elif self.masked: reason = '(value is masked)' else: raise raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}") @property def scale(self): """Time scale""" return self._time.scale def _set_scale(self, scale): """ This is the key routine that actually does time scale conversions. This is not public and not connected to the read-only scale property. """ if scale == self.scale: return if scale not in self.SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(self.SCALES))) if scale == 'utc' or self.scale == 'utc': # If doing a transform involving UTC then check that the leap # seconds table is up to date. _check_leapsec() # Determine the chain of scale transformations to get from the current # scale to the new scale. MULTI_HOPS contains a dict of all # transformations (xforms) that require intermediate xforms. # The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order. xform = (self.scale, scale) xform_sort = tuple(sorted(xform)) multi = MULTI_HOPS.get(xform_sort, ()) xforms = xform_sort[:1] + multi + xform_sort[-1:] # If we made the reverse xform then reverse it now. if xform_sort != xform: xforms = tuple(reversed(xforms)) # Transform the jd1,2 pairs through the chain of scale xforms. jd1, jd2 = self._time.jd1, self._time.jd2_filled for sys1, sys2 in zip(xforms[:-1], xforms[1:]): # Some xforms require an additional delta_ argument that is # provided through Time methods. These values may be supplied by # the user or computed based on available approximations. The # get_delta_ methods are available for only one combination of # sys1, sys2 though the property applies for both xform directions. args = [jd1, jd2] for sys12 in ((sys1, sys2), (sys2, sys1)): dt_method = '_get_delta_{}_{}'.format(*sys12) try: get_dt = getattr(self, dt_method) except AttributeError: pass else: args.append(get_dt(jd1, jd2)) break conv_func = getattr(erfa, sys1 + sys2) jd1, jd2 = conv_func(*args) jd1, jd2 = day_frac(jd1, jd2) if self.masked: jd2[self.mask] = np.nan self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision, self.in_subfmt, self.out_subfmt, from_jd=True) @property def precision(self): """ Decimal precision when outputting seconds as floating point (int value between 0 and 9 inclusive). """ return self._time.precision @precision.setter def precision(self, val): del self.cache if not isinstance(val, int) or val < 0 or val > 9: raise ValueError('precision attribute must be an int between ' '0 and 9') self._time.precision = val @property def in_subfmt(self): """ Unix wildcard pattern to select subformats for parsing string input times. """ return self._time.in_subfmt @in_subfmt.setter def in_subfmt(self, val): self._time.in_subfmt = val del self.cache @property def out_subfmt(self): """ Unix wildcard pattern to select subformats for outputting times. """ return self._time.out_subfmt @out_subfmt.setter def out_subfmt(self, val): # Setting the out_subfmt property here does validation of ``val`` self._time.out_subfmt = val del self.cache @property def shape(self): """The shape of the time instances. Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a tuple. Note that if different instances share some but not all underlying data, setting the shape of one instance can make the other instance unusable. Hence, it is strongly recommended to get new, reshaped instances with the ``reshape`` method. Raises ------ ValueError If the new shape has the wrong total number of elements. AttributeError If the shape of the ``jd1``, ``jd2``, ``location``, ``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed without the arrays being copied. For these cases, use the `Time.reshape` method (which copies any arrays that cannot be reshaped in-place). """ return self._time.jd1.shape @shape.setter def shape(self, shape): del self.cache # We have to keep track of arrays that were already reshaped, # since we may have to return those to their original shape if a later # shape-setting fails. reshaped = [] oldshape = self.shape # In-place reshape of data/attributes. Need to access _time.jd1/2 not # self.jd1/2 because the latter are not guaranteed to be the actual # data, and in fact should not be directly changeable from the public # API. for obj, attr in ((self._time, 'jd1'), (self._time, 'jd2'), (self, '_delta_ut1_utc'), (self, '_delta_tdb_tt'), (self, 'location')): val = getattr(obj, attr, None) if val is not None and val.size > 1: try: val.shape = shape except Exception: for val2 in reshaped: val2.shape = oldshape raise else: reshaped.append(val) def _shaped_like_input(self, value): if self._time.jd1.shape: if isinstance(value, np.ndarray): return value else: raise TypeError( f"JD is an array ({self._time.jd1!r}) but value " f"is not ({value!r})") else: # zero-dimensional array, is it safe to unbox? if (isinstance(value, np.ndarray) and not value.shape and not np.ma.is_masked(value)): if value.dtype.kind == 'M': # existing test doesn't want datetime64 converted return value[()] elif value.dtype.fields: # Unpack but keep field names; .item() doesn't # Still don't get python types in the fields return value[()] else: return value.item() else: return value @property def jd1(self): """ First of the two doubles that internally store time value(s) in JD. """ jd1 = self._time.mask_if_needed(self._time.jd1) return self._shaped_like_input(jd1) @property def jd2(self): """ Second of the two doubles that internally store time value(s) in JD. """ jd2 = self._time.mask_if_needed(self._time.jd2) return self._shaped_like_input(jd2) def to_value(self, format, subfmt='*'): """Get time values expressed in specified output format. This method allows representing the ``Time`` object in the desired output ``format`` and optional sub-format ``subfmt``. Available built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each format can have its own sub-formats For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long' uses ``numpy.longdouble`` for somewhat enhanced precision (with the enhancement depending on platform), and 'decimal' :class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the number of digits is also chosen such that time values are represented accurately. For built-in date-like string formats, one of 'date_hms', 'date_hm', or 'date' (or 'longdate_hms', etc., for 5-digit years in `~astropy.time.TimeFITS`). For sub-formats including seconds, the number of digits used for the fractional seconds is as set by `~astropy.time.Time.precision`. Parameters ---------- format : str The format in which one wants the time values. Default: the current format. subfmt : str or None, optional Value or wildcard pattern to select the sub-format in which the values should be given. The default of '*' picks the first available for a given format, i.e., 'float' or 'date_hms'. If `None`, use the instance's ``out_subfmt``. """ # TODO: add a precision argument (but ensure it is keyword argument # only, to make life easier for TimeDelta.to_value()). if format not in self.FORMATS: raise ValueError(f'format must be one of {list(self.FORMATS)}') cache = self.cache['format'] # Try to keep cache behaviour like it was in astropy < 4.0. key = format if subfmt is None else (format, subfmt) if key not in cache: if format == self.format: tm = self else: tm = self.replicate(format=format) # Some TimeFormat subclasses may not be able to handle being passes # on a out_subfmt. This includes some core classes like # TimeBesselianEpochString that do not have any allowed subfmts. But # those do deal with `self.out_subfmt` internally, so if subfmt is # the same, we do not pass it on. kwargs = {} if subfmt is not None and subfmt != tm.out_subfmt: kwargs['out_subfmt'] = subfmt try: value = tm._time.to_value(parent=tm, **kwargs) except TypeError as exc: # Try validating subfmt, e.g. for formats like 'jyear_str' that # do not implement out_subfmt in to_value() (because there are # no allowed subformats). If subfmt is not valid this gives the # same exception as would have occurred if the call to # `to_value()` had succeeded. tm._time._select_subfmts(subfmt) # Subfmt was valid, so fall back to the original exception to see # if it was lack of support for out_subfmt as a call arg. if "unexpected keyword argument 'out_subfmt'" in str(exc): raise ValueError( f"to_value() method for format {format!r} does not " f"support passing a 'subfmt' argument") from None else: # Some unforeseen exception so raise. raise value = tm._shaped_like_input(value) cache[key] = value return cache[key] @property def value(self): """Time value(s) in current format""" return self.to_value(self.format, None) @property def masked(self): return self._time.masked @property def mask(self): return self._time.mask def insert(self, obj, values, axis=0): """ Insert values before the given indices in the column and return a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object. The values to be inserted must conform to the rules for in-place setting of ``Time`` objects (see ``Get and set values`` in the ``Time`` documentation). The API signature matches the ``np.insert`` API, but is more limited. The specification of insert index ``obj`` must be a single integer, and the ``axis`` must be ``0`` for simple row insertion before the index. Parameters ---------- obj : int Integer index before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of quantity, ``values`` is converted to the matching type. axis : int, optional Axis along which to insert ``values``. Default is 0, which is the only allowed value and will insert a row. Returns ------- out : `~astropy.time.Time` subclass New time object with inserted value(s) """ # Validate inputs: obj arg is integer, axis=0, self is not a scalar, and # input index is in bounds. try: idx0 = operator.index(obj) except TypeError: raise TypeError('obj arg must be an integer') if axis != 0: raise ValueError('axis must be 0') if not self.shape: raise TypeError('cannot insert into scalar {} object' .format(self.__class__.__name__)) if abs(idx0) > len(self): raise IndexError('index {} is out of bounds for axis 0 with size {}' .format(idx0, len(self))) # Turn negative index into positive if idx0 < 0: idx0 = len(self) + idx0 # For non-Time object, use numpy to help figure out the length. (Note annoying # case of a string input that has a length which is not the length we want). if not isinstance(values, self.__class__): values = np.asarray(values) n_values = len(values) if values.shape else 1 # Finally make the new object with the correct length and set values for the # three sections, before insert, the insert, and after the insert. out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name) out._time.jd1[:idx0] = self._time.jd1[:idx0] out._time.jd2[:idx0] = self._time.jd2[:idx0] # This uses the Time setting machinery to coerce and validate as necessary. out[idx0:idx0 + n_values] = values out._time.jd1[idx0 + n_values:] = self._time.jd1[idx0:] out._time.jd2[idx0 + n_values:] = self._time.jd2[idx0:] return out def __setitem__(self, item, value): if not self.writeable: if self.shape: raise ValueError('{} object is read-only. Make a ' 'copy() or set "writeable" attribute to True.' .format(self.__class__.__name__)) else: raise ValueError('scalar {} object is read-only.' .format(self.__class__.__name__)) # Any use of setitem results in immediate cache invalidation del self.cache # Setting invalidates transform deltas for attr in ('_delta_tdb_tt', '_delta_ut1_utc'): if hasattr(self, attr): delattr(self, attr) if value is np.ma.masked or value is np.nan: self._time.jd2[item] = np.nan return value = self._make_value_equivalent(item, value) # Finally directly set the jd1/2 values. Locations are known to match. if self.scale is not None: value = getattr(value, self.scale) self._time.jd1[item] = value._time.jd1 self._time.jd2[item] = value._time.jd2 def isclose(self, other, atol=None): """Returns a boolean or boolean array where two Time objects are element-wise equal within a time tolerance. This evaluates the expression below:: abs(self - other) <= atol Parameters ---------- other : `~astropy.time.Time` Time object for comparison. atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Absolute tolerance for equality with units of time (e.g. ``u.s`` or ``u.day``). Default is two bits in the 128-bit JD time representation, equivalent to about 40 picosecs. """ if atol is None: # Note: use 2 bits instead of 1 bit based on experience in precision # tests, since taking the difference with a UTC time means one has # to do a scale change. atol = 2 * np.finfo(float).eps * u.day if not isinstance(atol, (u.Quantity, TimeDelta)): raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got " f'{atol.__class__.__name__} instead') try: # Separate these out so user sees where the problem is dt = self - other dt = abs(dt) out = dt <= atol except Exception as err: raise TypeError("'other' argument must support subtraction with Time " f"and return a value that supports comparison with " f"{atol.__class__.__name__}: {err}") return out def copy(self, format=None): """ Return a fully independent copy the Time object, optionally changing the format. If ``format`` is supplied then the time format of the returned Time object will be set accordingly, otherwise it will be unchanged from the original. In this method a full copy of the internal time arrays will be made. The internal time arrays are normally not changeable by the user so in most cases the ``replicate()`` method should be used. Parameters ---------- format : str, optional Time format of the copy. Returns ------- tm : Time object Copy of this object """ return self._apply('copy', format=format) def replicate(self, format=None, copy=False, cls=None): """ Return a replica of the Time object, optionally changing the format. If ``format`` is supplied then the time format of the returned Time object will be set accordingly, otherwise it will be unchanged from the original. If ``copy`` is set to `True` then a full copy of the internal time arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. The internal time arrays are normally not changeable by the user so in most cases it should not be necessary to set ``copy`` to `True`. The convenience method copy() is available in which ``copy`` is `True` by default. Parameters ---------- format : str, optional Time format of the replica. copy : bool, optional Return a true copy instead of using references where possible. Returns ------- tm : Time object Replica of this object """ return self._apply('copy' if copy else 'replicate', format=format, cls=cls) def _apply(self, method, *args, format=None, cls=None, **kwargs): """Create a new time object, possibly applying a method to the arrays. Parameters ---------- method : str or callable If string, can be 'replicate' or the name of a relevant `~numpy.ndarray` method. In the former case, a new time instance with unchanged internal data is created, while in the latter the method is applied to the internal ``jd1`` and ``jd2`` arrays, as well as to possible ``location``, ``_delta_ut1_utc``, and ``_delta_tdb_tt`` arrays. If a callable, it is directly applied to the above arrays. Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. If the ``format`` keyword argument is present, this will be used as the Time format of the replica. Examples -------- Some ways this is used internally:: copy : ``_apply('copy')`` replicate : ``_apply('replicate')`` reshape : ``_apply('reshape', new_shape)`` index or slice : ``_apply('__getitem__', item)`` broadcast : ``_apply(np.broadcast, shape=new_shape)`` """ new_format = self.format if format is None else format if callable(method): apply_method = lambda array: method(array, *args, **kwargs) else: if method == 'replicate': apply_method = None else: apply_method = operator.methodcaller(method, *args, **kwargs) jd1, jd2 = self._time.jd1, self._time.jd2 if apply_method: jd1 = apply_method(jd1) jd2 = apply_method(jd2) # Get a new instance of our class and set its attributes directly. tm = super().__new__(cls or self.__class__) tm._time = TimeJD(jd1, jd2, self.scale, precision=0, in_subfmt='*', out_subfmt='*', from_jd=True) # Optional ndarray attributes. for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location'): try: val = getattr(self, attr) except AttributeError: continue if apply_method: # Apply the method to any value arrays (though skip if there is # only an array scalar and the method would return a view, # since in that case nothing would change). if getattr(val, 'shape', ()): val = apply_method(val) elif method == 'copy' or method == 'flatten': # flatten should copy also for a single element array, but # we cannot use it directly for array scalars, since it # always returns a one-dimensional array. So, just copy. val = copy.copy(val) setattr(tm, attr, val) # Copy other 'info' attr only if it has actually been defined and the # time object is not a scalar (issue #10688). # See PR #3898 for further explanation and justification, along # with Quantity.__array_finalize__ if 'info' in self.__dict__: tm.info = self.info # Make the new internal _time object corresponding to the format # in the copy. If the format is unchanged this process is lightweight # and does not create any new arrays. if new_format not in tm.FORMATS: raise ValueError(f'format must be one of {list(tm.FORMATS)}') NewFormat = tm.FORMATS[new_format] tm._time = NewFormat( tm._time.jd1, tm._time.jd2, tm._time._scale, precision=self.precision, in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt), out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt), from_jd=True) tm._format = new_format tm.SCALES = self.SCALES return tm def __copy__(self): """ Overrides the default behavior of the `copy.copy` function in the python stdlib to behave like `Time.copy`. Does *not* make a copy of the JD arrays - only copies by reference. """ return self.replicate() def __deepcopy__(self, memo): """ Overrides the default behavior of the `copy.deepcopy` function in the python stdlib to behave like `Time.copy`. Does make a copy of the JD arrays. """ return self.copy() def _advanced_index(self, indices, axis=None, keepdims=False): """Turn argmin, argmax output into an advanced index. Argmin, argmax output contains indices along a given axis in an array shaped like the other dimensions. To use this to get values at the correct location, a list is constructed in which the other axes are indexed sequentially. For ``keepdims`` is ``True``, the net result is the same as constructing an index grid with ``np.ogrid`` and then replacing the ``axis`` item with ``indices`` with its shaped expanded at ``axis``. For ``keepdims`` is ``False``, the result is the same but with the ``axis`` dimension removed from all list entries. For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`. Parameters ---------- indices : array Output of argmin or argmax. axis : int or None axis along which argmin or argmax was used. keepdims : bool Whether to construct indices that keep or remove the axis along which argmin or argmax was used. Default: ``False``. Returns ------- advanced_index : list of arrays Suitable for use as an advanced index. """ if axis is None: return np.unravel_index(indices, self.shape) ndim = self.ndim if axis < 0: axis = axis + ndim if keepdims and indices.ndim < self.ndim: indices = np.expand_dims(indices, axis) index = [indices if i == axis else np.arange(s).reshape( (1,) * (i if keepdims or i < axis else i - 1) + (s,) + (1,) * (ndim - i - (1 if keepdims or i > axis else 2)) ) for i, s in enumerate(self.shape)] return tuple(index) def argmin(self, axis=None, out=None): """Return indices of the minimum values along the given axis. This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. See :func:`~numpy.argmin` for detailed documentation. """ # First get the minimum at normal precision. jd1, jd2 = self.jd1, self.jd2 approx = np.min(jd1 + jd2, axis, keepdims=True) # Approx is very close to the true minimum, and by subtracting it at # full precision, all numbers near 0 can be represented correctly, # so we can be sure we get the true minimum. # The below is effectively what would be done for # dt = (self - self.__class__(approx, format='jd')).jd # which translates to: # approx_jd1, approx_jd2 = day_frac(approx, 0.) # dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2) dt = (jd1 - approx) + jd2 return dt.argmin(axis, out) def argmax(self, axis=None, out=None): """Return indices of the maximum values along the given axis. This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. See :func:`~numpy.argmax` for detailed documentation. """ # For procedure, see comment on argmin. jd1, jd2 = self.jd1, self.jd2 approx = np.max(jd1 + jd2, axis, keepdims=True) dt = (jd1 - approx) + jd2 return dt.argmax(axis, out) def argsort(self, axis=-1): """Returns the indices that would sort the time array. This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Internally, it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen. """ # For procedure, see comment on argmin. jd1, jd2 = self.jd1, self.jd2 approx = jd1 + jd2 remainder = (jd1 - approx) + jd2 if axis is None: return np.lexsort((remainder.ravel(), approx.ravel())) else: return np.lexsort(keys=(remainder, approx), axis=axis) def min(self, axis=None, out=None, keepdims=False): """Minimum along a given axis. This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Note that the ``out`` argument is present only for compatibility with ``np.min``; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return self[self._advanced_index(self.argmin(axis), axis, keepdims)] def max(self, axis=None, out=None, keepdims=False): """Maximum along a given axis. This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and that corresponding attributes are copied. Note that the ``out`` argument is present only for compatibility with ``np.max``; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return self[self._advanced_index(self.argmax(axis), axis, keepdims)] def ptp(self, axis=None, out=None, keepdims=False): """Peak to peak (maximum - minimum) along a given axis. This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is used. Note that the ``out`` argument is present only for compatibility with `~numpy.ptp`; since `Time` instances are immutable, it is not possible to have an actual ``out`` to store the result in. """ if out is not None: raise ValueError("Since `Time` instances are immutable, ``out`` " "cannot be set to anything but ``None``.") return (self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)) def sort(self, axis=-1): """Return a copy sorted along the specified axis. This is similar to :meth:`~numpy.ndarray.sort`, but internally uses indexing with :func:`~numpy.lexsort` to ensure that the full precision given by the two doubles ``jd1`` and ``jd2`` is kept, and that corresponding attributes are properly sorted and copied as well. Parameters ---------- axis : int or None Axis to be sorted. If ``None``, the flattened array is sorted. By default, sort over the last axis. """ return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)] @property def cache(self): """ Return the cache associated with this instance. """ return self._time.cache @cache.deleter def cache(self): del self._time.cache def __getattr__(self, attr): """ Get dynamic attributes to output format or do timescale conversion. """ if attr in self.SCALES and self.scale is not None: cache = self.cache['scale'] if attr not in cache: if attr == self.scale: tm = self else: tm = self.replicate() tm._set_scale(attr) if tm.shape: # Prevent future modification of cached array-like object tm.writeable = False cache[attr] = tm return cache[attr] elif attr in self.FORMATS: return self.to_value(attr, subfmt=None) elif attr in TIME_SCALES: # allowed ones done above (self.SCALES) if self.scale is None: raise ScaleValueError("Cannot convert TimeDelta with " "undefined scale to any defined scale.") else: raise ScaleValueError("Cannot convert {} with scale " "'{}' to scale '{}'" .format(self.__class__.__name__, self.scale, attr)) else: # Should raise AttributeError return self.__getattribute__(attr) @override__dir__ def __dir__(self): result = set(self.SCALES) result.update(self.FORMATS) return result def _match_shape(self, val): """ Ensure that `val` is matched to length of self. If val has length 1 then broadcast, otherwise cast to double and make sure shape matches. """ val = _make_array(val, copy=True) # be conservative and copy if val.size > 1 and val.shape != self.shape: try: # check the value can be broadcast to the shape of self. val = np.broadcast_to(val, self.shape, subok=True) except Exception: raise ValueError('Attribute shape must match or be ' 'broadcastable to that of Time object. ' 'Typically, give either a single value or ' 'one for each time.') return val def _time_comparison(self, other, op): """If other is of same class as self, compare difference in self.scale. Otherwise, return NotImplemented """ if other.__class__ is not self.__class__: try: other = self.__class__(other, scale=self.scale) except Exception: # Let other have a go. return NotImplemented if(self.scale is not None and self.scale not in other.SCALES or other.scale is not None and other.scale not in self.SCALES): # Other will also not be able to do it, so raise a TypeError # immediately, allowing us to explain why it doesn't work. raise TypeError("Cannot compare {} instances with scales " "'{}' and '{}'".format(self.__class__.__name__, self.scale, other.scale)) if self.scale is not None and other.scale is not None: other = getattr(other, self.scale) return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.) def __lt__(self, other): return self._time_comparison(other, operator.lt) def __le__(self, other): return self._time_comparison(other, operator.le) def __eq__(self, other): """ If other is an incompatible object for comparison, return `False`. Otherwise, return `True` if the time difference between self and other is zero. """ return self._time_comparison(other, operator.eq) def __ne__(self, other): """ If other is an incompatible object for comparison, return `True`. Otherwise, return `False` if the time difference between self and other is zero. """ return self._time_comparison(other, operator.ne) def __gt__(self, other): return self._time_comparison(other, operator.gt) def __ge__(self, other): return self._time_comparison(other, operator.ge) class Time(TimeBase): """ Represent and manipulate times and dates for astronomy. A `Time` object is initialized with one or more times in the ``val`` argument. The input times in ``val`` must conform to the specified ``format`` and must correspond to the specified time ``scale``. The optional ``val2`` time input should be supplied only for numeric input formats (e.g. JD) where very high precision (better than 64-bit precision) is required. The allowed values for ``format`` can be listed with:: >>> list(Time.FORMATS) ['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date', 'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str'] See also: http://docs.astropy.org/en/stable/time/ Parameters ---------- val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object Value(s) to initialize the time or times. Bytes are decoded as ascii. val2 : sequence, ndarray, or number; optional Value(s) to initialize the time or times. Only used for numerical input, to help preserve precision. format : str, optional Format of input value(s) scale : str, optional Time scale of input value(s), must be one of the following: ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') precision : int, optional Digits of precision in string representation of time in_subfmt : str, optional Unix glob to select subformats for parsing input times out_subfmt : str, optional Unix glob to select subformat for outputting times location : `~astropy.coordinates.EarthLocation` or tuple, optional If given as an tuple, it should be able to initialize an an EarthLocation instance, i.e., either contain 3 items with units of length for geocentric coordinates, or contain a longitude, latitude, and an optional height for geodetic coordinates. Can be a single location, or one for each input time. If not given, assumed to be the center of the Earth for time scale transformations to and from the solar-system barycenter. copy : bool, optional Make a copy of the input values """ SCALES = TIME_SCALES """List of time scales""" FORMATS = TIME_FORMATS """Dict of time formats""" def __new__(cls, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if isinstance(val, Time): self = val.replicate(format=format, copy=copy, cls=cls) else: self = super().__new__(cls) return self def __init__(self, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if location is not None: from astropy.coordinates import EarthLocation if isinstance(location, EarthLocation): self.location = location else: self.location = EarthLocation(*location) if self.location.size == 1: self.location = self.location.squeeze() else: if not hasattr(self, 'location'): self.location = None if isinstance(val, Time): # Update _time formatting parameters if explicitly specified if precision is not None: self._time.precision = precision if in_subfmt is not None: self._time.in_subfmt = in_subfmt if out_subfmt is not None: self._time.out_subfmt = out_subfmt self.SCALES = TIME_TYPES[self.scale] if scale is not None: self._set_scale(scale) else: self._init_from_vals(val, val2, format, scale, copy, precision, in_subfmt, out_subfmt) self.SCALES = TIME_TYPES[self.scale] if self.location is not None and (self.location.size > 1 and self.location.shape != self.shape): try: # check the location can be broadcast to self's shape. self.location = np.broadcast_to(self.location, self.shape, subok=True) except Exception as err: raise ValueError('The location with shape {} cannot be ' 'broadcast against time with shape {}. ' 'Typically, either give a single location or ' 'one for each time.' .format(self.location.shape, self.shape)) from err def _make_value_equivalent(self, item, value): """Coerce setitem value into an equivalent Time object""" # If there is a vector location then broadcast to the Time shape # and then select with ``item`` if self.location is not None and self.location.shape: self_location = np.broadcast_to(self.location, self.shape, subok=True)[item] else: self_location = self.location if isinstance(value, Time): # Make sure locations are compatible. Location can be either None or # a Location object. if self_location is None and value.location is None: match = True elif ((self_location is None and value.location is not None) or (self_location is not None and value.location is None)): match = False else: match = np.all(self_location == value.location) if not match: raise ValueError('cannot set to Time with different location: ' 'expected location={} and ' 'got location={}' .format(self_location, value.location)) else: try: value = self.__class__(value, scale=self.scale, location=self_location) except Exception: try: value = self.__class__(value, scale=self.scale, format=self.format, location=self_location) except Exception as err: raise ValueError('cannot convert value to a compatible Time object: {}' .format(err)) return value @classmethod def now(cls): """ Creates a new object corresponding to the instant in time this method is called. .. note:: "Now" is determined using the `~datetime.datetime.utcnow` function, so its accuracy and precision is determined by that function. Generally that means it is set by the accuracy of your system clock. Returns ------- nowtime : :class:`~astropy.time.Time` A new `Time` object (or a subclass of `Time` if this is called from such a subclass) at the current time. """ # call `utcnow` immediately to be sure it's ASAP dtnow = datetime.utcnow() return cls(val=dtnow, format='datetime', scale='utc') info = TimeInfo() @classmethod def strptime(cls, time_string, format_string, **kwargs): """ Parse a string to a Time according to a format specification. See `time.strptime` documentation for format specification. >>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S') <Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000> Parameters ---------- time_string : str, sequence, or ndarray Objects containing time data of type string format_string : str String specifying format of time_string. kwargs : dict Any keyword arguments for ``Time``. If the ``format`` keyword argument is present, this will be used as the Time format. Returns ------- time_obj : `~astropy.time.Time` A new `~astropy.time.Time` object corresponding to the input ``time_string``. """ time_array = np.asarray(time_string) if time_array.dtype.kind not in ('U', 'S'): err = "Expected type is string, a bytes-like object or a sequence"\ " of these. Got dtype '{}'".format(time_array.dtype.kind) raise TypeError(err) to_string = (str if time_array.dtype.kind == 'U' else lambda x: str(x.item(), encoding='ascii')) iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, 'U30']) for time, formatted in iterator: tt, fraction = _strptime._strptime(to_string(time), format_string) time_tuple = tt[:6] + (fraction,) formatted[...] = '{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}'\ .format(*time_tuple) format = kwargs.pop('format', None) out = cls(*iterator.operands[1:], format='isot', **kwargs) if format is not None: out.format = format return out def strftime(self, format_spec): """ Convert Time to a string or a numpy.array of strings according to a format specification. See `time.strftime` documentation for format specification. Parameters ---------- format_spec : str Format definition of return string. Returns ------- formatted : str or numpy.array String or numpy.array of strings formatted according to the given format string. """ formatted_strings = [] for sk in self.replicate('iso')._time.str_kwargs(): date_tuple = date(sk['year'], sk['mon'], sk['day']).timetuple() datetime_tuple = (sk['year'], sk['mon'], sk['day'], sk['hour'], sk['min'], sk['sec'], date_tuple[6], date_tuple[7], -1) fmtd_str = format_spec if '%f' in fmtd_str: fmtd_str = fmtd_str.replace('%f', '{frac:0{precision}}'.format( frac=sk['fracsec'], precision=self.precision)) fmtd_str = strftime(fmtd_str, datetime_tuple) formatted_strings.append(fmtd_str) if self.isscalar: return formatted_strings[0] else: return np.array(formatted_strings).reshape(self.shape) def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None): """Light travel time correction to the barycentre or heliocentre. The frame transformations used to calculate the location of the solar system barycentre and the heliocentre rely on the erfa routine epv00, which is consistent with the JPL DE405 ephemeris to an accuracy of 11.2 km, corresponding to a light travel time of 4 microseconds. The routine assumes the source(s) are at large distance, i.e., neglects finite-distance effects. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The sky location to calculate the correction for. kind : str, optional ``'barycentric'`` (default) or ``'heliocentric'`` location : `~astropy.coordinates.EarthLocation`, optional The location of the observatory to calculate the correction for. If no location is given, the ``location`` attribute of the Time object is used ephemeris : str, optional Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set``. For more information, see `~astropy.coordinates.solar_system_ephemeris`. Returns ------- time_offset : `~astropy.time.TimeDelta` The time offset between the barycentre or Heliocentre and Earth, in TDB seconds. Should be added to the original time to get the time in the Solar system barycentre or the Heliocentre. Also, the time conversion to BJD will then include the relativistic correction as well. """ if kind.lower() not in ('barycentric', 'heliocentric'): raise ValueError("'kind' parameter must be one of 'heliocentric' " "or 'barycentric'") if location is None: if self.location is None: raise ValueError('An EarthLocation needs to be set or passed ' 'in to calculate bary- or heliocentric ' 'corrections') location = self.location from astropy.coordinates import (UnitSphericalRepresentation, CartesianRepresentation, HCRS, ICRS, GCRS, solar_system_ephemeris) # ensure sky location is ICRS compatible if not skycoord.is_transformable_to(ICRS()): raise ValueError("Given skycoord is not transformable to the ICRS") # get location of observatory in ITRS coordinates at this Time try: itrs = location.get_itrs(obstime=self) except Exception: raise ValueError("Supplied location does not have a valid `get_itrs` method") with solar_system_ephemeris.set(ephemeris): if kind.lower() == 'heliocentric': # convert to heliocentric coordinates, aligned with ICRS cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz else: # first we need to convert to GCRS coordinates with the correct # obstime, since ICRS coordinates have no frame time gcrs_coo = itrs.transform_to(GCRS(obstime=self)) # convert to barycentric (BCRS) coordinates, aligned with ICRS cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz # get unit ICRS vector to star spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation). represent_as(CartesianRepresentation).xyz) # Move X,Y,Z to last dimension, to enable possible broadcasting below. cpos = np.rollaxis(cpos, 0, cpos.ndim) spos = np.rollaxis(spos, 0, spos.ndim) # calculate light travel time correction tcor_val = (spos * cpos).sum(axis=-1) / const.c return TimeDelta(tcor_val, scale='tdb') def earth_rotation_angle(self, longitude=None): """Calculate local Earth rotation angle. Parameters ---------- longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional The longitude on the Earth at which to compute the Earth rotation angle (taken from a location as needed). If `None` (default), taken from the ``location`` attribute of the Time instance. If the special string 'tio', the result will be relative to the Terrestrial Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`). Returns ------- `~astropy.coordinates.Longitude` Local Earth rotation angle with units of hourangle. See Also -------- astropy.time.Time.sidereal_time References ---------- IAU 2006 NFA Glossary (currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html) Notes ----- The difference between apparent sidereal time and Earth rotation angle is the equation of the origins, which is the angle between the Celestial Intermediate Origin (CIO) and the equinox. Applying apparent sidereal time to the hour angle yields the true apparent Right Ascension with respect to the equinox, while applying the Earth rotation angle yields the intermediate (CIRS) Right Ascension with respect to the CIO. The result includes the TIO locator (s'), which positions the Terrestrial Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP) and is rigorously corrected for polar motion. (except when ``longitude='tio'``). """ if isinstance(longitude, str) and longitude == 'tio': longitude = 0 include_tio = False else: include_tio = True return self._sid_time_or_earth_rot_ang(longitude=longitude, function=erfa.era00, scales=('ut1',), include_tio=include_tio) def sidereal_time(self, kind, longitude=None, model=None): """Calculate sidereal time. Parameters ---------- kind : str ``'mean'`` or ``'apparent'``, i.e., accounting for precession only, or also for nutation. longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional The longitude on the Earth at which to compute the Earth rotation angle (taken from a location as needed). If `None` (default), taken from the ``location`` attribute of the Time instance. If the special string 'greenwich' or 'tio', the result will be relative to longitude 0 for models before 2000, and relative to the Terrestrial Intermediate Origin (TIO) for later ones (i.e., the output of the relevant ERFA function that calculates greenwich sidereal time). model : str or None; optional Precession (and nutation) model to use. The available ones are: - {0}: {1} - {2}: {3} If `None` (default), the last (most recent) one from the appropriate list above is used. Returns ------- `~astropy.coordinates.Longitude` Local sidereal time, with units of hourangle. See Also -------- astropy.time.Time.earth_rotation_angle References ---------- IAU 2006 NFA Glossary (currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html) Notes ----- The difference between apparent sidereal time and Earth rotation angle is the equation of the origins, which is the angle between the Celestial Intermediate Origin (CIO) and the equinox. Applying apparent sidereal time to the hour angle yields the true apparent Right Ascension with respect to the equinox, while applying the Earth rotation angle yields the intermediate (CIRS) Right Ascension with respect to the CIO. For the IAU precession models from 2000 onwards, the result includes the TIO locator (s'), which positions the Terrestrial Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP) and is rigorously corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``). """ # docstring is formatted below if kind.lower() not in SIDEREAL_TIME_MODELS.keys(): raise ValueError('The kind of sidereal time has to be {}'.format( ' or '.join(sorted(SIDEREAL_TIME_MODELS.keys())))) available_models = SIDEREAL_TIME_MODELS[kind.lower()] if model is None: model = sorted(available_models.keys())[-1] elif model.upper() not in available_models: raise ValueError( 'Model {} not implemented for {} sidereal time; ' 'available models are {}' .format(model, kind, sorted(available_models.keys()))) model_kwargs = available_models[model.upper()] if isinstance(longitude, str) and longitude in ('tio', 'greenwich'): longitude = 0 model_kwargs = model_kwargs.copy() model_kwargs['include_tio'] = False return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs) if isinstance(sidereal_time.__doc__, str): sidereal_time.__doc__ = sidereal_time.__doc__.format( 'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()), 'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys())) def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True): """Calculate a local sidereal time or Earth rotation angle. Parameters ---------- longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional The longitude on the Earth at which to compute the Earth rotation angle (taken from a location as needed). If `None` (default), taken from the ``location`` attribute of the Time instance. function : callable The ERFA function to use. scales : tuple of str The time scales that the function requires on input. include_tio : bool, optional Whether to includes the TIO locator corrected for polar motion. Should be `False` for pre-2000 IAU models. Default: `True`. Returns ------- `~astropy.coordinates.Longitude` Local sidereal time or Earth rotation angle, with units of hourangle. """ from astropy.coordinates import Longitude, EarthLocation from astropy.coordinates.builtin_frames.utils import get_polar_motion from astropy.coordinates.matrix_utilities import rotation_matrix if longitude is None: if self.location is None: raise ValueError('No longitude is given but the location for ' 'the Time object is not set.') longitude = self.location.lon elif isinstance(longitude, EarthLocation): longitude = longitude.lon else: # Sanity check on input; default unit is degree. longitude = Longitude(longitude, u.degree, copy=False) theta = self._call_erfa(function, scales) if include_tio: # TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio; # maybe posisble to factor out to one or the other. sp = self._call_erfa(erfa.sp00, ('tt',)) xp, yp = get_polar_motion(self) # Form the rotation matrix, CIRS to apparent [HA,Dec]. r = (rotation_matrix(longitude, 'z') @ rotation_matrix(-yp, 'x', unit=u.radian) @ rotation_matrix(-xp, 'y', unit=u.radian) @ rotation_matrix(theta+sp, 'z', unit=u.radian)) # Solve for angle. angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian else: angle = longitude + (theta << u.radian) return Longitude(angle, u.hourangle) def _call_erfa(self, function, scales): # TODO: allow erfa functions to be used on Time with __array_ufunc__. erfa_parameters = [getattr(getattr(self, scale)._time, jd_part) for scale in scales for jd_part in ('jd1', 'jd2_filled')] result = function(*erfa_parameters) if self.masked: result[self.mask] = np.nan return result def get_delta_ut1_utc(self, iers_table=None, return_status=False): """Find UT1 - UTC differences by interpolating in IERS Table. Parameters ---------- iers_table : `~astropy.utils.iers.IERS`, optional Table containing UT1-UTC differences from IERS Bulletins A and/or B. Default: `~astropy.utils.iers.earth_orientation_table` (which in turn defaults to the combined version provided by `~astropy.utils.iers.IERS_Auto`). return_status : bool Whether to return status values. If `False` (default), iers raises `IndexError` if any time is out of the range covered by the IERS table. Returns ------- ut1_utc : float or float array UT1-UTC, interpolated in IERS Table status : int or int array Status values (if ``return_status=`True```):: ``astropy.utils.iers.FROM_IERS_B`` ``astropy.utils.iers.FROM_IERS_A`` ``astropy.utils.iers.FROM_IERS_A_PREDICTION`` ``astropy.utils.iers.TIME_BEFORE_IERS_RANGE`` ``astropy.utils.iers.TIME_BEYOND_IERS_RANGE`` Notes ----- In normal usage, UT1-UTC differences are calculated automatically on the first instance ut1 is needed. Examples -------- To check in code whether any times are before the IERS table range:: >>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE >>> t = Time(['1961-01-01', '2000-01-01'], scale='utc') >>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA >>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA array([ True, False]...) """ if iers_table is None: from astropy.utils.iers import earth_orientation_table iers_table = earth_orientation_table.get() return iers_table.ut1_utc(self.utc, return_status=return_status) # Property for ERFA DUT arg = UT1 - UTC def _get_delta_ut1_utc(self, jd1=None, jd2=None): """ Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and jd2 args because it gets called that way when converting time scales. If delta_ut1_utc is not yet set, this will interpolate them from the the IERS table. """ # Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in # seconds. It is obtained from tables published by the IERS. if not hasattr(self, '_delta_ut1_utc'): from astropy.utils.iers import earth_orientation_table iers_table = earth_orientation_table.get() # jd1, jd2 are normally set (see above), except if delta_ut1_utc # is access directly; ensure we behave as expected for that case if jd1 is None: self_utc = self.utc jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled scale = 'utc' else: scale = self.scale # interpolate UT1-UTC in IERS table delta = iers_table.ut1_utc(jd1, jd2) # if we interpolated using UT1 jds, we may be off by one # second near leap seconds (and very slightly off elsewhere) if scale == 'ut1': # calculate UTC using the offset we got; the ERFA routine # is tolerant of leap seconds, so will do this right jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s)) # calculate a better estimate using the nearly correct UTC delta = iers_table.ut1_utc(jd1_utc, jd2_utc) self._set_delta_ut1_utc(delta) return self._delta_ut1_utc def _set_delta_ut1_utc(self, val): del self.cache if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. val = val.to(u.second).value val = self._match_shape(val) self._delta_ut1_utc = val # Note can't use @property because _get_delta_tdb_tt is explicitly # called with the optional jd1 and jd2 args. delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc) """UT1 - UTC time scale offset""" # Property for ERFA DTR arg = TDB - TT def _get_delta_tdb_tt(self, jd1=None, jd2=None): if not hasattr(self, '_delta_tdb_tt'): # If jd1 and jd2 are not provided (which is the case for property # attribute access) then require that the time scale is TT or TDB. # Otherwise the computations here are not correct. if jd1 is None or jd2 is None: if self.scale not in ('tt', 'tdb'): raise ValueError('Accessing the delta_tdb_tt attribute ' 'is only possible for TT or TDB time ' 'scales') else: jd1 = self._time.jd1 jd2 = self._time.jd2_filled # First go from the current input time (which is either # TDB or TT) to an approximate UT1. Since TT and TDB are # pretty close (few msec?), assume TT. Similarly, since the # UT1 terms are very small, use UTC instead of UT1. njd1, njd2 = erfa.tttai(jd1, jd2) njd1, njd2 = erfa.taiutc(njd1, njd2) # subtract 0.5, so UT is fraction of the day from midnight ut = day_frac(njd1 - 0.5, njd2)[1] if self.location is None: # Assume geocentric. self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0., 0., 0.) else: location = self.location # Geodetic params needed for d_tdb_tt() lon = location.lon rxy = np.hypot(location.x, location.y) z = location.z self._delta_tdb_tt = erfa.dtdb( jd1, jd2, ut, lon.to_value(u.radian), rxy.to_value(u.km), z.to_value(u.km)) return self._delta_tdb_tt def _set_delta_tdb_tt(self, val): del self.cache if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. val = val.to(u.second).value val = self._match_shape(val) self._delta_tdb_tt = val # Note can't use @property because _get_delta_tdb_tt is explicitly # called with the optional jd1 and jd2 args. delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt) """TDB - TT time scale offset""" def __sub__(self, other): # T - Tdelta = T # T - T = Tdelta other_is_delta = not isinstance(other, Time) if other_is_delta: # T - Tdelta # Check other is really a TimeDelta or something that can initialize. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # we need a constant scale to calculate, which is guaranteed for # TimeDelta, but not for Time (which can be UTC) out = self.replicate() if self.scale in other.SCALES: if other.scale not in (out.scale, None): other = getattr(other, out.scale) else: if other.scale is None: out._set_scale('tai') else: if self.scale not in TIME_TYPES[other.scale]: raise TypeError("Cannot subtract Time and TimeDelta instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) out._set_scale(other.scale) # remove attributes that are invalidated by changing time for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): if hasattr(out, attr): delattr(out, attr) else: # T - T # the scales should be compatible (e.g., cannot convert TDB to LOCAL) if other.scale not in self.SCALES: raise TypeError("Cannot subtract Time instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) self_time = (self._time if self.scale in TIME_DELTA_SCALES else self.tai._time) # set up TimeDelta, subtraction to be done shortly out = TimeDelta(self_time.jd1, self_time.jd2, format='jd', scale=self_time.scale) if other.scale != out.scale: other = getattr(other, out.scale) jd1 = out._time.jd1 - other._time.jd1 jd2 = out._time.jd2 - other._time.jd2 out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) if other_is_delta: # Go back to left-side scale if needed out._set_scale(self.scale) return out def __add__(self, other): # T + Tdelta = T # T + T = error if isinstance(other, Time): raise OperandTypeError(self, other, '+') # Check other is really a TimeDelta or something that can initialize. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # ideally, we calculate in the scale of the Time item, since that is # what we want the output in, but this may not be possible, since # TimeDelta cannot be converted arbitrarily out = self.replicate() if self.scale in other.SCALES: if other.scale not in (out.scale, None): other = getattr(other, out.scale) else: if other.scale is None: out._set_scale('tai') else: if self.scale not in TIME_TYPES[other.scale]: raise TypeError("Cannot add Time and TimeDelta instances " "with scales '{}' and '{}'" .format(self.scale, other.scale)) out._set_scale(other.scale) # remove attributes that are invalidated by changing time for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): if hasattr(out, attr): delattr(out, attr) jd1 = out._time.jd1 + other._time.jd1 jd2 = out._time.jd2 + other._time.jd2 out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) # Go back to left-side scale if needed out._set_scale(self.scale) return out # Reverse addition is possible: <something-Tdelta-ish> + T # but there is no case of <something> - T, so no __rsub__. def __radd__(self, other): return self.__add__(other) def to_datetime(self, timezone=None): # TODO: this could likely go through to_value, as long as that # had an **kwargs part that was just passed on to _time. tm = self.replicate(format='datetime') return tm._shaped_like_input(tm._time.to_value(timezone)) to_datetime.__doc__ = TimeDatetime.to_value.__doc__ class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning): """Warning for missing unit or format in TimeDelta""" pass class TimeDelta(TimeBase): """ Represent the time difference between two times. A TimeDelta object is initialized with one or more times in the ``val`` argument. The input times in ``val`` must conform to the specified ``format``. The optional ``val2`` time input should be supplied only for numeric input formats (e.g. JD) where very high precision (better than 64-bit precision) is required. The allowed values for ``format`` can be listed with:: >>> list(TimeDelta.FORMATS) ['sec', 'jd', 'datetime'] Note that for time differences, the scale can be among three groups: geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational ('ut1'). Within each of these, the scales for time differences are the same. Conversion between geocentric and barycentric is possible, as there is only a scale factor change, but one cannot convert to or from 'ut1', as this requires knowledge of the actual times, not just their difference. For a similar reason, 'utc' is not a valid scale for a time difference: a UTC day is not always 86400 seconds. See also: - https://docs.astropy.org/en/stable/time/ - https://docs.astropy.org/en/stable/time/index.html#time-deltas Parameters ---------- val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object Value(s) to initialize the time difference(s). Any quantities will be converted appropriately (with care taken to avoid rounding errors for regular time units). val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional Additional values, as needed to preserve precision. format : str, optional Format of input value(s). For numerical inputs without units, "jd" is assumed and values are interpreted as days. A deprecation warning is raised in this case. To avoid the warning, either specify the format or add units to the input values. scale : str, optional Time scale of input value(s), must be one of the following values: ('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or ``None``), the scale is arbitrary; when added or subtracted from a ``Time`` instance, it will be used without conversion. copy : bool, optional Make a copy of the input values """ SCALES = TIME_DELTA_SCALES """List of time delta scales.""" FORMATS = TIME_DELTA_FORMATS """Dict of time delta formats.""" info = TimeDeltaInfo() def __new__(cls, val, val2=None, format=None, scale=None, precision=None, in_subfmt=None, out_subfmt=None, location=None, copy=False): if isinstance(val, TimeDelta): self = val.replicate(format=format, copy=copy, cls=cls) else: self = super().__new__(cls) return self def __init__(self, val, val2=None, format=None, scale=None, copy=False): if isinstance(val, TimeDelta): if scale is not None: self._set_scale(scale) else: format = format or self._get_format(val) self._init_from_vals(val, val2, format, scale, copy) if scale is not None: self.SCALES = TIME_DELTA_TYPES[scale] @staticmethod def _get_format(val): if isinstance(val, timedelta): return 'datetime' if getattr(val, 'unit', None) is None: warn('Numerical value without unit or explicit format passed to' ' TimeDelta, assuming days', TimeDeltaMissingUnitWarning) return 'jd' def replicate(self, *args, **kwargs): out = super().replicate(*args, **kwargs) out.SCALES = self.SCALES return out def to_datetime(self): """ Convert to ``datetime.timedelta`` object. """ tm = self.replicate(format='datetime') return tm._shaped_like_input(tm._time.value) def _set_scale(self, scale): """ This is the key routine that actually does time scale conversions. This is not public and not connected to the read-only scale property. """ if scale == self.scale: return if scale not in self.SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(self.SCALES))) # For TimeDelta, there can only be a change in scale factor, # which is written as time2 - time1 = scale_offset * time1 scale_offset = SCALE_OFFSETS[(self.scale, scale)] if scale_offset is None: self._time.scale = scale else: jd1, jd2 = self._time.jd1, self._time.jd2 offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset) self._time = self.FORMATS[self.format]( jd1 + offset1, jd2 + offset2, scale, self.precision, self.in_subfmt, self.out_subfmt, from_jd=True) def _add_sub(self, other, op): """Perform common elements of addition / subtraction for two delta times""" # If not a TimeDelta then see if it can be turned into a TimeDelta. if not isinstance(other, TimeDelta): try: other = TimeDelta(other) except Exception: return NotImplemented # the scales should be compatible (e.g., cannot convert TDB to TAI) if(self.scale is not None and self.scale not in other.SCALES or other.scale is not None and other.scale not in self.SCALES): raise TypeError("Cannot add TimeDelta instances with scales " "'{}' and '{}'".format(self.scale, other.scale)) # adjust the scale of other if the scale of self is set (or no scales) if self.scale is not None or other.scale is None: out = self.replicate() if other.scale is not None: other = getattr(other, self.scale) else: out = other.replicate() jd1 = op(self._time.jd1, other._time.jd1) jd2 = op(self._time.jd2, other._time.jd2) out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) return out def __add__(self, other): # If other is a Time then use Time.__add__ to do the calculation. if isinstance(other, Time): return other.__add__(self) return self._add_sub(other, operator.add) def __sub__(self, other): # TimeDelta - Time is an error if isinstance(other, Time): raise OperandTypeError(self, other, '-') return self._add_sub(other, operator.sub) def __radd__(self, other): return self.__add__(other) def __rsub__(self, other): out = self.__sub__(other) return -out def __neg__(self): """Negation of a `TimeDelta` object.""" new = self.copy() new._time.jd1 = -self._time.jd1 new._time.jd2 = -self._time.jd2 return new def __abs__(self): """Absolute value of a `TimeDelta` object.""" jd1, jd2 = self._time.jd1, self._time.jd2 negative = jd1 + jd2 < 0 new = self.copy() new._time.jd1 = np.where(negative, -jd1, jd1) new._time.jd2 = np.where(negative, -jd2, jd2) return new def __mul__(self, other): """Multiplication of `TimeDelta` objects by numbers/arrays.""" # Check needed since otherwise the self.jd1 * other multiplication # would enter here again (via __rmul__) if isinstance(other, Time): raise OperandTypeError(self, other, '*') elif ((isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (isinstance(other, str) and other == '')): return self.copy() # If other is something consistent with a dimensionless quantity # (could just be a float or an array), then we can just multiple in. try: other = u.Quantity(other, u.dimensionless_unscaled, copy=False) except Exception: # If not consistent with a dimensionless quantity, try downgrading # self to a quantity and see if things work. try: return self.to(u.day) * other except Exception: # The various ways we could multiply all failed; # returning NotImplemented to give other a final chance. return NotImplemented jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value) out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) if self.format != 'jd': out = out.replicate(format=self.format) return out def __rmul__(self, other): """Multiplication of numbers/arrays with `TimeDelta` objects.""" return self.__mul__(other) def __truediv__(self, other): """Division of `TimeDelta` objects by numbers/arrays.""" # Cannot do __mul__(1./other) as that looses precision if ((isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (isinstance(other, str) and other == '')): return self.copy() # If other is something consistent with a dimensionless quantity # (could just be a float or an array), then we can just divide in. try: other = u.Quantity(other, u.dimensionless_unscaled, copy=False) except Exception: # If not consistent with a dimensionless quantity, try downgrading # self to a quantity and see if things work. try: return self.to(u.day) / other except Exception: # The various ways we could divide all failed; # returning NotImplemented to give other a final chance. return NotImplemented jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value) out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) if self.format != 'jd': out = out.replicate(format=self.format) return out def __rtruediv__(self, other): """Division by `TimeDelta` objects of numbers/arrays.""" # Here, we do not have to worry about returning NotImplemented, # since other has already had a chance to look at us. return other / self.to(u.day) def to(self, unit, equivalencies=[]): """ Convert to a quantity in the specified unit. Parameters ---------- unit : unit-like The unit to convert to. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no equivalencies will be applied at all, not even any set globallyq or within a context. Returns ------- quantity : `~astropy.units.Quantity` The quantity in the units specified. See also -------- to_value : get the numerical value in a given unit. """ return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(unit, equivalencies=equivalencies) def to_value(self, *args, **kwargs): """Get time delta values expressed in specified output format or unit. This method is flexible and handles both conversion to a specified ``TimeDelta`` format / sub-format AND conversion to a specified unit. If positional argument(s) are provided then the first one is checked to see if it is a valid ``TimeDelta`` format, and next it is checked to see if it is a valid unit or unit string. To convert to a ``TimeDelta`` format and optional sub-format the options are:: tm = TimeDelta(1.0 * u.s) tm.to_value('jd') # equivalent of tm.jd tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object tm.to_value('jd', subfmt='decimal') tm.to_value(format='jd', subfmt='decimal') To convert to a unit with optional equivalencies, the options are:: tm.to_value('hr') # convert to u.hr (hours) tm.to_value('hr', []) # specify equivalencies as a positional arg tm.to_value('hr', equivalencies=[]) tm.to_value(unit='hr', equivalencies=[]) The built-in `~astropy.time.TimeDelta` options for ``format`` are: {'jd', 'sec', 'datetime'}. For the two numerical formats 'jd' and 'sec', the available ``subfmt`` options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long' uses ``numpy.longdouble`` for somewhat enhanced precision (with the enhancement depending on platform), and 'decimal' instances of :class:`decimal.Decimal` for full precision. For the 'str' and 'bytes' sub-formats, the number of digits is also chosen such that time values are represented accurately. Default: as set by ``out_subfmt`` (which by default picks the first available for a given format, i.e., 'float'). Parameters ---------- format : str, optional The format in which one wants the `~astropy.time.TimeDelta` values. Default: the current format. subfmt : str, optional Possible sub-format in which the values should be given. Default: as set by ``out_subfmt`` (which by default picks the first available for a given format, i.e., 'float' or 'date_hms'). unit : `~astropy.units.UnitBase` instance or str, optional The unit in which the value should be given. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no equivalencies will be applied at all, not even any set globally or within a context. Returns ------- value : ndarray or scalar The value in the format or units specified. See also -------- to : Convert to a `~astropy.units.Quantity` instance in a given unit. value : The time value in the current format. """ if not (args or kwargs): raise TypeError('to_value() missing required format or unit argument') # TODO: maybe allow 'subfmt' also for units, keeping full precision # (effectively, by doing the reverse of quantity_day_frac)? # This way, only equivalencies could lead to possible precision loss. if ('format' in kwargs or (args != () and (args[0] is None or args[0] in self.FORMATS))): # Super-class will error with duplicate arguments, etc. return super().to_value(*args, **kwargs) # With positional arguments, we try parsing the first one as a unit, # so that on failure we can give a more informative exception. if args: try: unit = u.Unit(args[0]) except ValueError as exc: raise ValueError("first argument is not one of the known " "formats ({}) and failed to parse as a unit." .format(list(self.FORMATS))) from exc args = (unit,) + args[1:] return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(*args, **kwargs) def _make_value_equivalent(self, item, value): """Coerce setitem value into an equivalent TimeDelta object""" if not isinstance(value, TimeDelta): try: value = self.__class__(value, scale=self.scale, format=self.format) except Exception as err: raise ValueError('cannot convert value to a compatible TimeDelta ' 'object: {}'.format(err)) return value def isclose(self, other, atol=None, rtol=0.0): """Returns a boolean or boolean array where two TimeDelta objects are element-wise equal within a time tolerance. This effectively evaluates the expression below:: abs(self - other) <= atol + rtol * abs(other) Parameters ---------- other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Quantity or TimeDelta object for comparison. atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` Absolute tolerance for equality with units of time (e.g. ``u.s`` or ``u.day``). Default is one bit in the 128-bit JD time representation, equivalent to about 20 picosecs. rtol : float Relative tolerance for equality """ try: other_day = other.to_value(u.day) except Exception as err: raise TypeError(f"'other' argument must support conversion to days: {err}") if atol is None: atol = np.finfo(float).eps * u.day if not isinstance(atol, (u.Quantity, TimeDelta)): raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got " f'{atol.__class__.__name__} instead') return np.isclose(self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)) class ScaleValueError(Exception): pass def _make_array(val, copy=False): """ Take ``val`` and convert/reshape to an array. If ``copy`` is `True` then copy input values. Returns ------- val : ndarray Array version of ``val``. """ if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time): dtype = object else: dtype = None val = np.array(val, copy=copy, subok=True, dtype=dtype) # Allow only float64, string or object arrays as input # (object is for datetime, maybe add more specific test later?) # This also ensures the right byteorder for float64 (closes #2942). if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize: pass elif val.dtype.kind in 'OSUMaV': pass else: val = np.asanyarray(val, dtype=np.float64) return val def _check_for_masked_and_fill(val, val2): """ If ``val`` or ``val2`` are masked arrays then fill them and cast to ndarray. Returns a mask corresponding to the logical-or of masked elements in ``val`` and ``val2``. If neither is masked then the return ``mask`` is ``None``. If either ``val`` or ``val2`` are masked then they are replaced with filled versions of themselves. Parameters ---------- val : ndarray or MaskedArray Input val val2 : ndarray or MaskedArray Input val2 Returns ------- mask, val, val2: ndarray or None Mask: (None or bool ndarray), val, val2: ndarray """ def get_as_filled_ndarray(mask, val): """ Fill the given MaskedArray ``val`` from the first non-masked element in the array. This ensures that upstream Time initialization will succeed. Note that nothing happens if there are no masked elements. """ fill_value = None if np.any(val.mask): # Final mask is the logical-or of inputs mask = mask | val.mask # First unmasked element. If all elements are masked then # use fill_value=None from above which will use val.fill_value. # As long as the user has set this appropriately then all will # be fine. val_unmasked = val.compressed() # 1-d ndarray of unmasked values if len(val_unmasked) > 0: fill_value = val_unmasked[0] # Fill the input ``val``. If fill_value is None then this just returns # an ndarray view of val (no copy). val = val.filled(fill_value) return mask, val mask = False if isinstance(val, np.ma.MaskedArray): mask, val = get_as_filled_ndarray(mask, val) if isinstance(val2, np.ma.MaskedArray): mask, val2 = get_as_filled_ndarray(mask, val2) return mask, val, val2 class OperandTypeError(TypeError): def __init__(self, left, right, op=None): op_string = '' if op is None else f' for {op}' super().__init__( "Unsupported operand type(s){}: " "'{}' and '{}'".format(op_string, left.__class__.__name__, right.__class__.__name__)) def _check_leapsec(): global _LEAP_SECONDS_CHECK if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE: from astropy.utils import iers with _LEAP_SECONDS_LOCK: # There are three ways we can get here: # 1. First call (NOT_STARTED). # 2. Re-entrant call (RUNNING). We skip the initialisation # and don't worry about leap second errors. # 3. Another thread which raced with the first call # (RUNNING). The first thread has relinquished the # lock to us, so initialization is complete. if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED: _LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING update_leap_seconds() _LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE def update_leap_seconds(files=None): """If the current ERFA leap second table is out of date, try to update it. Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an up-to-date table. See that routine for the definition of "out of date". In order to make it safe to call this any time, all exceptions are turned into warnings, Parameters ---------- files : list of path-like, optional List of files/URLs to attempt to open. By default, uses defined by `astropy.utils.iers.LeapSeconds.auto_open`, which includes the table used by ERFA itself, so if that is up to date, nothing will happen. Returns ------- n_update : int Number of items updated. """ try: from astropy.utils import iers table = iers.LeapSeconds.auto_open(files) return erfa.leap_seconds.update(table) except Exception as exc: warn("leap-second auto-update failed due to the following " f"exception: {exc!r}", AstropyWarning) return 0
77853a287648e0838ce7966e7d6d88042a35a911cee30a51c6b2d6ee9acd1a37
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import config as _config class Conf(_config.ConfigNamespace): # noqa """ Configuration parameters for `astropy.table`. """ use_fast_parser = _config.ConfigItem( ['True', 'False', 'force'], "Use fast C parser for supported time strings formats, including ISO, " "ISOT, and YearDayTime. Allowed values are the 'False' (use Python parser)," "'True' (use C parser and fall through to Python parser if fails), and " "'force' (use C parser and raise exception if it fails). Note that the" "options are all strings.") conf = Conf() # noqa from .formats import * # noqa from .core import * # noqa
b0243777d8bb77d74ab43a0e909ecff367229f6d579374e316912559c75b2f7a
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Copied from astropy/convolution/setup_package.py import os from setuptools import Extension import numpy C_TIME_PKGDIR = os.path.relpath(os.path.dirname(__file__)) SRC_FILES = [os.path.join(C_TIME_PKGDIR, filename) for filename in ['src/parse_times.c']] def get_extensions(): # Add '-Rpass-missed=.*' to ``extra_compile_args`` when compiling with clang # to report missed optimizations _time_ext = Extension(name='astropy.time._parse_times', sources=SRC_FILES, include_dirs=[numpy.get_include()], language='c') return [_time_ext]
d241d92bc130b1e33540d5212f80c7939d86a63d2bea0dea21490244be6e436e
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Time utilities. In particular, routines to do basic arithmetic on numbers represented by two doubles, using the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Furthermore, some helper routines to turn strings and other types of objects into two values, and vice versa. """ import decimal import numpy as np import astropy.units as u def day_frac(val1, val2, factor=None, divisor=None): """Return the sum of ``val1`` and ``val2`` as two float64s. The returned floats are an integer part and the fractional remainder, with the latter guaranteed to be within -0.5 and 0.5 (inclusive on either side, as the integer is rounded to even). The arithmetic is all done with exact floating point operations so no precision is lost to rounding error. It is assumed the sum is less than about 1e16, otherwise the remainder will be greater than 1.0. Parameters ---------- val1, val2 : array of float Values to be summed. factor : float, optional If given, multiply the sum by it. divisor : float, optional If given, divide the sum by it. Returns ------- day, frac : float64 Integer and fractional part of val1 + val2. """ # Add val1 and val2 exactly, returning the result as two float64s. # The first is the approximate sum (with some floating point error) # and the second is the error of the float64 sum. sum12, err12 = two_sum(val1, val2) if factor is not None: sum12, carry = two_product(sum12, factor) carry += err12 * factor sum12, err12 = two_sum(sum12, carry) if divisor is not None: q1 = sum12 / divisor p1, p2 = two_product(q1, divisor) d1, d2 = two_sum(sum12, -p1) d2 += err12 d2 -= p2 q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost sum12, err12 = two_sum(q1, q2) # get integer fraction day = np.round(sum12) extra, frac = two_sum(sum12, -day) frac += extra + err12 # Our fraction can now have gotten >0.5 or <-0.5, which means we would # loose one bit of precision. So, correct for that. excess = np.round(frac) day += excess extra, frac = two_sum(sum12, -day) frac += extra + err12 return day, frac def quantity_day_frac(val1, val2=None): """Like ``day_frac``, but for quantities with units of time. The quantities are separately converted to days. Here, we need to take care with the conversion since while the routines here can do accurate multiplication, the conversion factor itself may not be accurate. For instance, if the quantity is in seconds, the conversion factor is 1./86400., which is not exactly representable as a float. To work around this, for conversion factors less than unity, rather than multiply by that possibly inaccurate factor, the value is divided by the conversion factor of a day to that unit (i.e., by 86400. for seconds). For conversion factors larger than 1, such as 365.25 for years, we do just multiply. With this scheme, one has precise conversion factors for all regular time units that astropy defines. Note, however, that it does not necessarily work for all custom time units, and cannot work when conversion to time is via an equivalency. For those cases, one remains limited by the fact that Quantity calculations are done in double precision, not in quadruple precision as for time. """ if val2 is not None: res11, res12 = quantity_day_frac(val1) res21, res22 = quantity_day_frac(val2) # This summation is can at most lose 1 ULP in the second number. return res11 + res21, res12 + res22 try: factor = val1.unit.to(u.day) except Exception: # Not a simple scaling, so cannot do the full-precision one. # But at least try normal conversion, since equivalencies may be set. return val1.to_value(u.day), 0. if factor == 1.: return day_frac(val1.value, 0.) if factor > 1: return day_frac(val1.value, 0., factor=factor) else: divisor = u.day.to(val1.unit) return day_frac(val1.value, 0., divisor=divisor) def two_sum(a, b): """ Add ``a`` and ``b`` exactly, returning the result as two float64s. The first is the approximate sum (with some floating point error) and the second is the error of the float64 sum. Using the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Returns ------- sum, err : float64 Approximate sum of a + b and the exact floating point error """ x = a + b eb = x - a # bvirtual in Shewchuk ea = x - eb # avirtual in Shewchuk eb = b - eb # broundoff in Shewchuk ea = a - ea # aroundoff in Shewchuk return x, ea + eb def two_product(a, b): """ Multiple ``a`` and ``b`` exactly, returning the result as two float64s. The first is the approximate product (with some floating point error) and the second is the error of the float64 product. Uses the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf Returns ------- prod, err : float64 Approximate product a * b and the exact floating point error """ x = a * b ah, al = split(a) bh, bl = split(b) y1 = ah * bh y = x - y1 y2 = al * bh y -= y2 y3 = ah * bl y -= y3 y4 = al * bl y = y4 - y return x, y def split(a): """ Split float64 in two aligned parts. Uses the procedure of Shewchuk, 1997, Discrete & Computational Geometry 18(3):305-363 http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf """ c = 134217729. * a # 2**27+1. abig = c - a ah = c - abig al = a - ah return ah, al _enough_decimal_places = 34 # to represent two doubles def longdouble_to_twoval(val1, val2=None): if val2 is None: val2 = val1.dtype.type(0.) else: best_type = np.result_type(val1.dtype, val2.dtype) val1 = val1.astype(best_type, copy=False) val2 = val2.astype(best_type, copy=False) # day_frac is independent of dtype, as long as the dtype # are the same and no factor or divisor is given. i, f = day_frac(val1, val2) return i.astype(float, copy=False), f.astype(float, copy=False) def decimal_to_twoval1(val1, val2=None): with decimal.localcontext() as ctx: ctx.prec = _enough_decimal_places d = decimal.Decimal(val1) i = round(d) f = d - i return float(i), float(f) def bytes_to_twoval1(val1, val2=None): return decimal_to_twoval1(val1.decode('ascii')) def twoval_to_longdouble(val1, val2): return val1.astype(np.longdouble) + val2.astype(np.longdouble) def twoval_to_decimal1(val1, val2): with decimal.localcontext() as ctx: ctx.prec = _enough_decimal_places return decimal.Decimal(val1) + decimal.Decimal(val2) def twoval_to_string1(val1, val2, fmt): if val2 == 0.: # For some formats, only a single float is really used. # For those, let numpy take care of correct number of digits. return str(val1) result = format(twoval_to_decimal1(val1, val2), fmt).strip('0') if result[-1] == '.': result += '0' return result def twoval_to_bytes1(val1, val2, fmt): return twoval_to_string1(val1, val2, fmt).encode('ascii') decimal_to_twoval = np.vectorize(decimal_to_twoval1) bytes_to_twoval = np.vectorize(bytes_to_twoval1) twoval_to_decimal = np.vectorize(twoval_to_decimal1) twoval_to_string = np.vectorize(twoval_to_string1, excluded='fmt') twoval_to_bytes = np.vectorize(twoval_to_bytes1, excluded='fmt')
dcfaf8df506acbff729375740794ab3219086561f56a7648a933e606df526bd4
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import fnmatch import time import re import datetime import warnings from decimal import Decimal from collections import OrderedDict, defaultdict import numpy as np import erfa from astropy.utils.decorators import lazyproperty, classproperty from astropy.utils.exceptions import AstropyDeprecationWarning import astropy.units as u from . import _parse_times from . import utils from .utils import day_frac, quantity_day_frac, two_sum, two_product from . import conf __all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix', 'TimeUnixTai', 'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear', 'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString', 'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime', 'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch', 'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD', 'TimeEpochDateString', 'TimeBesselianEpochString', 'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS', 'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64', 'TimeYMDHMS', 'TimeNumeric', 'TimeDeltaNumeric'] __doctest_skip__ = ['TimePlotDate'] # These both get filled in at end after TimeFormat subclasses defined. # Use an OrderedDict to fix the order in which formats are tried. # This ensures, e.g., that 'isot' gets tried before 'fits'. TIME_FORMATS = OrderedDict() TIME_DELTA_FORMATS = OrderedDict() # Translations between deprecated FITS timescales defined by # Rots et al. 2015, A&A 574:A36, and timescales used here. FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt', 'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'} def _regexify_subfmts(subfmts): """ Iterate through each of the sub-formats and try substituting simple regular expressions for the strptime codes for year, month, day-of-month, hour, minute, second. If no % characters remain then turn the final string into a compiled regex. This assumes time formats do not have a % in them. This is done both to speed up parsing of strings and to allow mixed formats where strptime does not quite work well enough. """ new_subfmts = [] for subfmt_tuple in subfmts: subfmt_in = subfmt_tuple[1] if isinstance(subfmt_in, str): for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'), ('%m', r'(?P<mon>\d{1,2})'), ('%d', r'(?P<mday>\d{1,2})'), ('%H', r'(?P<hour>\d{1,2})'), ('%M', r'(?P<min>\d{1,2})'), ('%S', r'(?P<sec>\d{1,2})')): subfmt_in = subfmt_in.replace(strptime_code, regex) if '%' not in subfmt_in: subfmt_tuple = (subfmt_tuple[0], re.compile(subfmt_in + '$'), subfmt_tuple[2]) new_subfmts.append(subfmt_tuple) return tuple(new_subfmts) class TimeFormat: """ Base class for time representations. Parameters ---------- val1 : numpy ndarray, list, number, str, or bytes Values to initialize the time or times. Bytes are decoded as ascii. val2 : numpy ndarray, list, or number; optional Value(s) to initialize the time or times. Only used for numerical input, to help preserve precision. scale : str Time scale of input value(s) precision : int Precision for seconds as floating point in_subfmt : str Select subformat for inputting string times out_subfmt : str Select subformat for outputting string times from_jd : bool If true then val1, val2 are jd1, jd2 """ _default_scale = 'utc' # As of astropy 0.4 subfmts = () _registry = TIME_FORMATS def __init__(self, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False): self.scale = scale # validation of scale done later with _check_scale self.precision = precision self.in_subfmt = in_subfmt self.out_subfmt = out_subfmt self._jd1, self._jd2 = None, None if from_jd: self.jd1 = val1 self.jd2 = val2 else: val1, val2 = self._check_val_type(val1, val2) self.set_jds(val1, val2) def __init_subclass__(cls, **kwargs): # Register time formats that define a name, but leave out astropy_time since # it is not a user-accessible format and is only used for initialization into # a different format. if 'name' in cls.__dict__ and cls.name != 'astropy_time': # FIXME: check here that we're not introducing a collision with # an existing method or attribute; problem is it could be either # astropy.time.Time or astropy.time.TimeDelta, and at the point # where this is run neither of those classes have necessarily been # constructed yet. if 'value' in cls.__dict__ and not hasattr(cls.value, "fget"): raise ValueError("If defined, 'value' must be a property") cls._registry[cls.name] = cls # If this class defines its own subfmts, preprocess the definitions. if 'subfmts' in cls.__dict__: cls.subfmts = _regexify_subfmts(cls.subfmts) return super().__init_subclass__(**kwargs) @classmethod def _get_allowed_subfmt(cls, subfmt): """Get an allowed subfmt for this class, either the input ``subfmt`` if this is valid or '*' as a default. This method gets used in situations where the format of an existing Time object is changing and so the out_ or in_subfmt may need to be coerced to the default '*' if that ``subfmt`` is no longer valid. """ try: cls._select_subfmts(subfmt) except ValueError: subfmt = '*' return subfmt @property def in_subfmt(self): return self._in_subfmt @in_subfmt.setter def in_subfmt(self, subfmt): # Validate subfmt value for this class, raises ValueError if not. self._select_subfmts(subfmt) self._in_subfmt = subfmt @property def out_subfmt(self): return self._out_subfmt @out_subfmt.setter def out_subfmt(self, subfmt): # Validate subfmt value for this class, raises ValueError if not. self._select_subfmts(subfmt) self._out_subfmt = subfmt @property def jd1(self): return self._jd1 @jd1.setter def jd1(self, jd1): self._jd1 = _validate_jd_for_storage(jd1) if self._jd2 is not None: self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2) @property def jd2(self): return self._jd2 @jd2.setter def jd2(self, jd2): self._jd2 = _validate_jd_for_storage(jd2) if self._jd1 is not None: self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2) def __len__(self): return len(self.jd1) @property def scale(self): """Time scale""" self._scale = self._check_scale(self._scale) return self._scale @scale.setter def scale(self, val): self._scale = val def mask_if_needed(self, value): if self.masked: value = np.ma.array(value, mask=self.mask, copy=False) return value @property def mask(self): if 'mask' not in self.cache: self.cache['mask'] = np.isnan(self.jd2) if self.cache['mask'].shape: self.cache['mask'].flags.writeable = False return self.cache['mask'] @property def masked(self): if 'masked' not in self.cache: self.cache['masked'] = bool(np.any(self.mask)) return self.cache['masked'] @property def jd2_filled(self): return np.nan_to_num(self.jd2) if self.masked else self.jd2 @lazyproperty def cache(self): """ Return the cache associated with this instance. """ return defaultdict(dict) def _check_val_type(self, val1, val2): """Input value validation, typically overridden by derived classes""" # val1 cannot contain nan, but val2 can contain nan isfinite1 = np.isfinite(val1) if val1.size > 1: # Calling .all() on a scalar is surprisingly slow isfinite1 = isfinite1.all() # Note: arr.all() about 3x faster than np.all(arr) elif val1.size == 0: isfinite1 = False ok1 = (val1.dtype.kind == 'f' and val1.dtype.itemsize >= 8 and isfinite1 or val1.size == 0) ok2 = val2 is None or ( val2.dtype.kind == 'f' and val2.dtype.itemsize >= 8 and not np.any(np.isinf(val2))) or val2.size == 0 if not (ok1 and ok2): raise TypeError('Input values for {} class must be finite doubles' .format(self.name)) if getattr(val1, 'unit', None) is not None: # Convert any quantity-likes to days first, attempting to be # careful with the conversion, so that, e.g., large numbers of # seconds get converted without losing precision because # 1/86400 is not exactly representable as a float. val1 = u.Quantity(val1, copy=False) if val2 is not None: val2 = u.Quantity(val2, copy=False) try: val1, val2 = quantity_day_frac(val1, val2) except u.UnitsError: raise u.UnitConversionError( "only quantities with time units can be " "used to instantiate Time instances.") # We now have days, but the format may expect another unit. # On purpose, multiply with 1./day_unit because typically it is # 1./erfa.DAYSEC, and inverting it recovers the integer. # (This conversion will get undone in format's set_jds, hence # there may be room for optimizing this.) factor = 1. / getattr(self, 'unit', 1.) if factor != 1.: val1, carry = two_product(val1, factor) carry += val2 * factor val1, val2 = two_sum(val1, carry) elif getattr(val2, 'unit', None) is not None: raise TypeError('Cannot mix float and Quantity inputs') if val2 is None: val2 = np.array(0, dtype=val1.dtype) def asarray_or_scalar(val): """ Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray or a Python or numpy scalar. """ return np.asarray(val) if isinstance(val, np.ndarray) else val return asarray_or_scalar(val1), asarray_or_scalar(val2) def _check_scale(self, scale): """ Return a validated scale value. If there is a class attribute 'scale' then that defines the default / required time scale for this format. In this case if a scale value was provided that needs to match the class default, otherwise return the class default. Otherwise just make sure that scale is in the allowed list of scales. Provide a different error message if `None` (no value) was supplied. """ if scale is None: scale = self._default_scale if scale not in TIME_SCALES: raise ScaleValueError("Scale value '{}' not in " "allowed values {}" .format(scale, TIME_SCALES)) return scale def set_jds(self, val1, val2): """ Set internal jd1 and jd2 from val1 and val2. Must be provided by derived classes. """ raise NotImplementedError def to_value(self, parent=None, out_subfmt=None): """ Return time representation from internal jd1 and jd2 in specified ``out_subfmt``. This is the base method that ignores ``parent`` and uses the ``value`` property to compute the output. This is done by temporarily setting ``self.out_subfmt`` and calling ``self.value``. This is required for legacy Format subclasses prior to astropy 4.0 New code should instead implement the value functionality in ``to_value()`` and then make the ``value`` property be a simple call to ``self.to_value()``. Parameters ---------- parent : object Parent `~astropy.time.Time` object associated with this `~astropy.time.TimeFormat` object out_subfmt : str or None Output subformt (use existing self.out_subfmt if `None`) Returns ------- value : numpy.array, numpy.ma.array Array or masked array of formatted time representation values """ # Get value via ``value`` property, overriding out_subfmt temporarily if needed. if out_subfmt is not None: out_subfmt_orig = self.out_subfmt try: self.out_subfmt = out_subfmt value = self.value finally: self.out_subfmt = out_subfmt_orig else: value = self.value return self.mask_if_needed(value) @property def value(self): raise NotImplementedError @classmethod def _select_subfmts(cls, pattern): """ Return a list of subformats where name matches ``pattern`` using fnmatch. If no subformat matches pattern then a ValueError is raised. A special case is a format with no allowed subformats, i.e. subfmts=(), and pattern='*'. This is OK and happens when this method is used for validation of an out_subfmt. """ if not isinstance(pattern, str): raise ValueError('subfmt attribute must be a string') elif pattern == '*': return cls.subfmts subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)] if len(subfmts) == 0: if len(cls.subfmts) == 0: raise ValueError(f'subformat not allowed for format {cls.name}') else: subfmt_names = [x[0] for x in cls.subfmts] raise ValueError(f'subformat {pattern!r} must match one of ' f'{subfmt_names} for format {cls.name}') return subfmts class TimeNumeric(TimeFormat): subfmts = ( ('float', np.float64, None, np.add), ('long', np.longdouble, utils.longdouble_to_twoval, utils.twoval_to_longdouble), ('decimal', np.object_, utils.decimal_to_twoval, utils.twoval_to_decimal), ('str', np.str_, utils.decimal_to_twoval, utils.twoval_to_string), ('bytes', np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes), ) def _check_val_type(self, val1, val2): """Input value validation, typically overridden by derived classes""" # Save original state of val2 because the super()._check_val_type below # may change val2 from None to np.array(0). The value is saved in order # to prevent a useless and slow call to np.result_type() below in the # most common use-case of providing only val1. orig_val2_is_none = val2 is None if val1.dtype.kind == 'f': val1, val2 = super()._check_val_type(val1, val2) elif (not orig_val2_is_none or not (val1.dtype.kind in 'US' or (val1.dtype.kind == 'O' and all(isinstance(v, Decimal) for v in val1.flat)))): raise TypeError( 'for {} class, input should be doubles, string, or Decimal, ' 'and second values are only allowed for doubles.' .format(self.name)) val_dtype = (val1.dtype if orig_val2_is_none else np.result_type(val1.dtype, val2.dtype)) subfmts = self._select_subfmts(self.in_subfmt) for subfmt, dtype, convert, _ in subfmts: if np.issubdtype(val_dtype, dtype): break else: raise ValueError('input type not among selected sub-formats.') if convert is not None: try: val1, val2 = convert(val1, val2) except Exception: raise TypeError( 'for {} class, input should be (long) doubles, string, ' 'or Decimal, and second values are only allowed for ' '(long) doubles.'.format(self.name)) return val1, val2 def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None): """ Return time representation from internal jd1 and jd2. Subclasses that require ``parent`` or to adjust the jds should override this method. """ # TODO: do this in __init_subclass__? if self.__class__.value.fget is not self.__class__.to_value: return self.value if jd1 is None: jd1 = self.jd1 if jd2 is None: jd2 = self.jd2 if out_subfmt is None: out_subfmt = self.out_subfmt subfmt = self._select_subfmts(out_subfmt)[0] kwargs = {} if subfmt[0] in ('str', 'bytes'): unit = getattr(self, 'unit', 1) digits = int(np.ceil(np.log10(unit / np.finfo(float).eps))) # TODO: allow a way to override the format. kwargs['fmt'] = f'.{digits}f' value = subfmt[3](jd1, jd2, **kwargs) return self.mask_if_needed(value) value = property(to_value) class TimeJD(TimeNumeric): """ Julian Date time format. This represents the number of days since the beginning of the Julian Period. For example, 2451544.5 in JD is midnight on January 1, 2000. """ name = 'jd' def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. self.jd1, self.jd2 = day_frac(val1, val2) class TimeMJD(TimeNumeric): """ Modified Julian Date time format. This represents the number of days since midnight on November 17, 1858. For example, 51544.0 in MJD is midnight on January 1, 2000. """ name = 'mjd' def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. jd1, jd2 = day_frac(val1, val2) jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h). self.jd1, self.jd2 = day_frac(jd1, jd2) def to_value(self, **kwargs): jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision. jd2 = self.jd2 return super().to_value(jd1=jd1, jd2=jd2, **kwargs) value = property(to_value) class TimeDecimalYear(TimeNumeric): """ Time as a decimal year, with integer values corresponding to midnight of the first day of each year. For example 2000.5 corresponds to the ISO time '2000-07-02 00:00:00'. """ name = 'decimalyear' def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. sum12, err12 = two_sum(val1, val2) iy_start = np.trunc(sum12).astype(int) extra, y_frac = two_sum(sum12, -iy_start) y_frac += extra + err12 val = (val1 + val2).astype(np.double) iy_start = np.trunc(val).astype(int) imon = np.ones_like(iy_start) iday = np.ones_like(iy_start) ihr = np.zeros_like(iy_start) imin = np.zeros_like(iy_start) isec = np.zeros_like(y_frac) # Possible enhancement: use np.unique to only compute start, stop # for unique values of iy_start. scale = self.scale.upper().encode('ascii') jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec) jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec) t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd') t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd') t_frac = t_start + (t_end - t_start) * y_frac self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2) def to_value(self, **kwargs): scale = self.scale.upper().encode('ascii') iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0 self.jd1, self.jd2_filled) imon = np.ones_like(iy_start) iday = np.ones_like(iy_start) ihr = np.zeros_like(iy_start) imin = np.zeros_like(iy_start) isec = np.zeros_like(self.jd1) # Possible enhancement: use np.unique to only compute start, stop # for unique values of iy_start. scale = self.scale.upper().encode('ascii') jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec) jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec) # Trying to be precise, but more than float64 not useful. dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start) dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start) decimalyear = iy_start + dt / dt_end return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs) value = property(to_value) class TimeFromEpoch(TimeNumeric): """ Base class for times that represent the interval from a particular epoch as a floating point multiple of a unit time interval (e.g. seconds or days). """ @classproperty(lazy=True) def _epoch(cls): # Ideally we would use `def epoch(cls)` here and not have the instance # property below. However, this breaks the sphinx API docs generation # in a way that was not resolved. See #10406 for details. return Time(cls.epoch_val, cls.epoch_val2, scale=cls.epoch_scale, format=cls.epoch_format) @property def epoch(self): """Reference epoch time from which the time interval is measured""" return self._epoch def set_jds(self, val1, val2): """ Initialize the internal jd1 and jd2 attributes given val1 and val2. For an TimeFromEpoch subclass like TimeUnix these will be floats giving the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00). """ # Form new JDs based on epoch time + time from epoch (converted to JD). # One subtlety that might not be obvious is that 1.000 Julian days in # UTC can be 86400 or 86401 seconds. For the TimeUnix format the # assumption is that every day is exactly 86400 seconds, so this is, in # principle, doing the math incorrectly, *except* that it matches the # definition of Unix time which does not include leap seconds. # note: use divisor=1./self.unit, since this is either 1 or 1/86400, # and 1/86400 is not exactly representable as a float64, so multiplying # by that will cause rounding errors. (But inverting it as a float64 # recovers the exact number) day, frac = day_frac(val1, val2, divisor=1. / self.unit) jd1 = self.epoch.jd1 + day jd2 = self.epoch.jd2 + frac # For the usual case that scale is the same as epoch_scale, we only need # to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and # abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here # without another call to day_frac(). Note also that `round(jd2.item())` # is about 10x faster than `np.round(jd2)`` for a scalar. if self.epoch.scale == self.scale: jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item()) jd1 += jd1_extra jd2 -= jd1_extra self.jd1, self.jd2 = jd1, jd2 return # Create a temporary Time object corresponding to the new (jd1, jd2) in # the epoch scale (e.g. UTC for TimeUnix) then convert that to the # desired time scale for this object. # # A known limitation is that the transform from self.epoch_scale to # self.scale cannot involve any metadata like lat or lon. try: tm = getattr(Time(jd1, jd2, scale=self.epoch_scale, format='jd'), self.scale) except Exception as err: raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'" "to specified scale '{}', got error:\n{}" .format(self.name, self.epoch_scale, self.scale, err)) from err self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2) def to_value(self, parent=None, **kwargs): # Make sure that scale is the same as epoch scale so we can just # subtract the epoch and convert if self.scale != self.epoch_scale: if parent is None: raise ValueError('cannot compute value without parent Time object') try: tm = getattr(parent, self.epoch_scale) except Exception as err: raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'" "to specified scale '{}', got error:\n{}" .format(self.name, self.epoch_scale, self.scale, err)) from err jd1, jd2 = tm._time.jd1, tm._time.jd2 else: jd1, jd2 = self.jd1, self.jd2 # This factor is guaranteed to be exactly representable, which # means time_from_epoch1 is calculated exactly. factor = 1. / self.unit time_from_epoch1 = (jd1 - self.epoch.jd1) * factor time_from_epoch2 = (jd2 - self.epoch.jd2) * factor return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs) value = property(to_value) @property def _default_scale(self): return self.epoch_scale class TimeUnix(TimeFromEpoch): """ Unix time (UTC): seconds from 1970-01-01 00:00:00 UTC, ignoring leap seconds. For example, 946684800.0 in Unix time is midnight on January 1, 2000. NOTE: this quantity is not exactly unix time and differs from the strict POSIX definition by up to 1 second on days with a leap second. POSIX unix time actually jumps backward by 1 second at midnight on leap second days while this class value is monotonically increasing at 86400 seconds per UTC day. """ name = 'unix' unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) epoch_val = '1970-01-01 00:00:00' epoch_val2 = None epoch_scale = 'utc' epoch_format = 'iso' class TimeUnixTai(TimeUnix): """ Unix time (TAI): SI seconds elapsed since 1970-01-01 00:00:00 TAI (see caveats). This will generally differ from standard (UTC) Unix time by the cumulative integral number of leap seconds introduced into UTC since 1972-01-01 UTC plus the initial offset of 10 seconds at that date. This convention matches the definition of linux CLOCK_TAI (https://www.cl.cam.ac.uk/~mgk25/posix-clocks.html), and the Precision Time Protocol (https://en.wikipedia.org/wiki/Precision_Time_Protocol), which is also used by the White Rabbit protocol in High Energy Physics: https://white-rabbit.web.cern.ch. Caveats: - Before 1972, fractional adjustments to UTC were made, so the difference between ``unix`` and ``unix_tai`` time is no longer an integer. - Because of the fractional adjustments, to be very precise, ``unix_tai`` is the number of seconds since ``1970-01-01 00:00:00 TAI`` or equivalently ``1969-12-31 23:59:51.999918 UTC``. The difference between TAI and UTC at that epoch was 8.000082 sec. - On the day of a positive leap second the difference between ``unix`` and ``unix_tai`` times increases linearly through the day by 1.0. See also the documentation for the `~astropy.time.TimeUnix` class. - Negative leap seconds are possible, though none have been needed to date. Examples -------- >>> # get the current offset between TAI and UTC >>> from astropy.time import Time >>> t = Time('2020-01-01', scale='utc') >>> t.unix_tai - t.unix 37.0 >>> # Before 1972, the offset between TAI and UTC was not integer >>> t = Time('1970-01-01', scale='utc') >>> t.unix_tai - t.unix # doctest: +FLOAT_CMP 8.000082 >>> # Initial offset of 10 seconds in 1972 >>> t = Time('1972-01-01', scale='utc') >>> t.unix_tai - t.unix 10.0 """ name = 'unix_tai' epoch_val = '1970-01-01 00:00:00' epoch_scale = 'tai' class TimeCxcSec(TimeFromEpoch): """ Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT. For example, 63072064.184 is midnight on January 1, 2000. """ name = 'cxcsec' unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) epoch_val = '1998-01-01 00:00:00' epoch_val2 = None epoch_scale = 'tt' epoch_format = 'iso' class TimeGPS(TimeFromEpoch): """GPS time: seconds from 1980-01-06 00:00:00 UTC For example, 630720013.0 is midnight on January 1, 2000. Notes ===== This implementation is strictly a representation of the number of seconds (including leap seconds) since midnight UTC on 1980-01-06. GPS can also be considered as a time scale which is ahead of TAI by a fixed offset (to within about 100 nanoseconds). For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer """ name = 'gps' unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) epoch_val = '1980-01-06 00:00:19' # above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai epoch_val2 = None epoch_scale = 'tai' epoch_format = 'iso' class TimePlotDate(TimeFromEpoch): """ Matplotlib `~matplotlib.pyplot.plot_date` input: 1 + number of days from 0001-01-01 00:00:00 UTC This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date` function:: >>> import matplotlib.pyplot as plt >>> jyear = np.linspace(2000, 2001, 20) >>> t = Time(jyear, format='jyear', scale='utc') >>> plt.plot_date(t.plot_date, jyear) >>> plt.gcf().autofmt_xdate() # orient date labels at a slant >>> plt.draw() For example, 730120.0003703703 is midnight on January 1, 2000. """ # This corresponds to the zero reference time for matplotlib plot_date(). # Note that TAI and UTC are equivalent at the reference time. name = 'plot_date' unit = 1.0 epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1 epoch_val2 = None epoch_scale = 'utc' epoch_format = 'jd' @lazyproperty def epoch(self): """Reference epoch time from which the time interval is measured""" try: # Matplotlib >= 3.3 has a get_epoch() function from matplotlib.dates import get_epoch except ImportError: # If no get_epoch() then the epoch is '0001-01-01' _epoch = self._epoch else: # Get the matplotlib date epoch as an ISOT string in UTC epoch_utc = get_epoch() from erfa import ErfaWarning with warnings.catch_warnings(): # Catch possible dubious year warnings from erfa warnings.filterwarnings('ignore', category=ErfaWarning) _epoch = Time(epoch_utc, scale='utc', format='isot') _epoch.format = 'jd' return _epoch class TimeStardate(TimeFromEpoch): """ Stardate: date units from 2318-07-05 12:00:00 UTC. For example, stardate 41153.7 is 00:52 on April 30, 2363. See http://trekguide.com/Stardates.htm#TNG for calculations and reference points """ name = 'stardate' unit = 0.397766856 # Stardate units per day epoch_val = '2318-07-05 11:00:00' # Date and time of stardate 00000.00 epoch_val2 = None epoch_scale = 'tai' epoch_format = 'iso' class TimeUnique(TimeFormat): """ Base class for time formats that can uniquely create a time object without requiring an explicit format specifier. This class does nothing but provide inheritance to identify a class as unique. """ class TimeAstropyTime(TimeUnique): """ Instantiate date from an Astropy Time object (or list thereof). This is purely for instantiating from a Time object. The output format is the same as the first time instance. """ name = 'astropy_time' def __new__(cls, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False): """ Use __new__ instead of __init__ to output a class instance that is the same as the class of the first Time object in the list. """ val1_0 = val1.flat[0] if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0) for val in val1.flat)): raise TypeError('Input values for {} class must all be same ' 'astropy Time type.'.format(cls.name)) if scale is None: scale = val1_0.scale if val1.shape: vals = [getattr(val, scale)._time for val in val1] jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals]) jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals]) # Collect individual location values and merge into a single location. if any(tm.location is not None for tm in val1): if any(tm.location is None for tm in val1): raise ValueError('cannot concatenate times unless all locations ' 'are set or no locations are set') locations = [] for tm in val1: location = np.broadcast_to(tm.location, tm._time.jd1.shape, subok=True) locations.append(np.atleast_1d(location)) location = np.concatenate(locations) else: location = None else: val = getattr(val1_0, scale)._time jd1, jd2 = val.jd1, val.jd2 location = val1_0.location OutTimeFormat = val1_0._time.__class__ self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt, from_jd=True) # Make a temporary hidden attribute to transfer location back to the # parent Time object where it needs to live. self._location = location return self class TimeDatetime(TimeUnique): """ Represent date as Python standard library `~datetime.datetime` object Example:: >>> from astropy.time import Time >>> from datetime import datetime >>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc') >>> t.iso '2000-01-02 12:00:00.000' >>> t.tt.datetime datetime.datetime(2000, 1, 2, 12, 1, 4, 184000) """ name = 'datetime' def _check_val_type(self, val1, val2): if not all(isinstance(val, datetime.datetime) for val in val1.flat): raise TypeError('Input values for {} class must be ' 'datetime objects'.format(self.name)) if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def set_jds(self, val1, val2): """Convert datetime object contained in val1 to jd1, jd2""" # Iterate through the datetime objects, getting year, month, etc. iterator = np.nditer([val1, None, None, None, None, None, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=[None] + 5*[np.intc] + [np.double]) for val, iy, im, id, ihr, imin, dsec in iterator: dt = val.item() if dt.tzinfo is not None: dt = (dt - dt.utcoffset()).replace(tzinfo=None) iy[...] = dt.year im[...] = dt.month id[...] = dt.day ihr[...] = dt.hour imin[...] = dt.minute dsec[...] = dt.second + dt.microsecond / 1e6 jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), *iterator.operands[1:]) self.jd1, self.jd2 = day_frac(jd1, jd2) def to_value(self, timezone=None, parent=None, out_subfmt=None): """ Convert to (potentially timezone-aware) `~datetime.datetime` object. If ``timezone`` is not ``None``, return a timezone-aware datetime object. Parameters ---------- timezone : {`~datetime.tzinfo`, None}, optional If not `None`, return timezone-aware datetime. Returns ------- `~datetime.datetime` If ``timezone`` is not ``None``, output will be timezone-aware. """ if out_subfmt is not None: # Out_subfmt not allowed for this format, so raise the standard # exception by trying to validate the value. self._select_subfmts(out_subfmt) if timezone is not None: if self._scale != 'utc': raise ScaleValueError("scale is {}, must be 'utc' when timezone " "is supplied.".format(self._scale)) # Rather than define a value property directly, we have a function, # since we want to be able to pass in timezone information. scale = self.scale.upper().encode('ascii') iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec self.jd1, self.jd2_filled) ihrs = ihmsfs['h'] imins = ihmsfs['m'] isecs = ihmsfs['s'] ifracs = ihmsfs['f'] iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=7*[None] + [object]) for iy, im, id, ihr, imin, isec, ifracsec, out in iterator: if isec >= 60: raise ValueError('Time {} is within a leap second but datetime ' 'does not support leap seconds' .format((iy, im, id, ihr, imin, isec, ifracsec))) if timezone is not None: out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec, tzinfo=TimezoneInfo()).astimezone(timezone) else: out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec) return self.mask_if_needed(iterator.operands[-1]) value = property(to_value) class TimeYMDHMS(TimeUnique): """ ymdhms: A Time format to represent Time as year, month, day, hour, minute, second (thus the name ymdhms). Acceptable inputs must have keys or column names in the "YMDHMS" set of ``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``: - Dict with keys in the YMDHMS set - NumPy structured array, record array or astropy Table, or single row of those types, with column names in the YMDHMS set One can supply a subset of the YMDHMS values, for instance only 'year', 'month', and 'day'. Inputs have the following defaults:: 'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0 When the input is supplied as a ``dict`` then each value can be either a scalar value or an array. The values will be broadcast to a common shape. Example:: >>> from astropy.time import Time >>> t = Time({'year': 2015, 'month': 2, 'day': 3, ... 'hour': 12, 'minute': 13, 'second': 14.567}, ... scale='utc') >>> t.iso '2015-02-03 12:13:14.567' >>> t.ymdhms.year 2015 """ name = 'ymdhms' def _check_val_type(self, val1, val2): """ This checks inputs for the YMDHMS format. It is bit more complex than most format checkers because of the flexible input that is allowed. Also, it actually coerces ``val1`` into an appropriate dict of ndarrays that can be used easily by ``set_jds()``. This is useful because it makes it easy to get default values in that routine. Parameters ---------- val1 : ndarray or None val2 : ndarray or None Returns ------- val1_as_dict, val2 : val1 as dict or None, val2 is always None """ if val2 is not None: raise ValueError('val2 must be None for ymdhms format') ymdhms = ['year', 'month', 'day', 'hour', 'minute', 'second'] if val1.dtype.names: # Convert to a dict of ndarray val1_as_dict = {name: val1[name] for name in val1.dtype.names} elif val1.shape == (0,): # Input was empty list [], so set to None and set_jds will handle this return None, None elif (val1.dtype.kind == 'O' and val1.shape == () and isinstance(val1.item(), dict)): # Code gets here for input as a dict. The dict input # can be either scalar values or N-d arrays. # Extract the item (which is a dict) and broadcast values to the # same shape here. names = val1.item().keys() values = val1.item().values() val1_as_dict = {name: value for name, value in zip(names, np.broadcast_arrays(*values))} else: raise ValueError('input must be dict or table-like') # Check that the key names now are good. names = val1_as_dict.keys() required_names = ymdhms[:len(names)] def comma_repr(vals): return ', '.join(repr(val) for val in vals) bad_names = set(names) - set(ymdhms) if bad_names: raise ValueError(f'{comma_repr(bad_names)} not allowed as YMDHMS key name(s)') if set(names) != set(required_names): raise ValueError(f'for {len(names)} input key names ' f'you must supply {comma_repr(required_names)}') return val1_as_dict, val2 def set_jds(self, val1, val2): if val1 is None: # Input was empty list [] jd1 = np.array([], dtype=np.float64) jd2 = np.array([], dtype=np.float64) else: jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), val1['year'], val1.get('month', 1), val1.get('day', 1), val1.get('hour', 0), val1.get('minute', 0), val1.get('second', 0)) self.jd1, self.jd2 = day_frac(jd1, jd2) @property def value(self): scale = self.scale.upper().encode('ascii') iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9, self.jd1, self.jd2_filled) out = np.empty(self.jd1.shape, dtype=[('year', 'i4'), ('month', 'i4'), ('day', 'i4'), ('hour', 'i4'), ('minute', 'i4'), ('second', 'f8')]) out['year'] = iys out['month'] = ims out['day'] = ids out['hour'] = ihmsfs['h'] out['minute'] = ihmsfs['m'] out['second'] = ihmsfs['s'] + ihmsfs['f'] * 10**(-9) out = out.view(np.recarray) return self.mask_if_needed(out) class TimezoneInfo(datetime.tzinfo): """ Subclass of the `~datetime.tzinfo` object, used in the to_datetime method to specify timezones. It may be safer in most cases to use a timezone database package like pytz rather than defining your own timezones - this class is mainly a workaround for users without pytz. """ @u.quantity_input(utc_offset=u.day, dst=u.day) def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None): """ Parameters ---------- utc_offset : `~astropy.units.Quantity`, optional Offset from UTC in days. Defaults to zero. dst : `~astropy.units.Quantity`, optional Daylight Savings Time offset in days. Defaults to zero (no daylight savings). tzname : str or None, optional Name of timezone Examples -------- >>> from datetime import datetime >>> from astropy.time import TimezoneInfo # Specifies a timezone >>> import astropy.units as u >>> utc = TimezoneInfo() # Defaults to UTC >>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1 >>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour) >>> print(dt_aware) 2000-01-01 00:00:00+01:00 >>> print(dt_aware.astimezone(utc)) 1999-12-31 23:00:00+00:00 """ if utc_offset == 0 and dst == 0 and tzname is None: tzname = 'UTC' self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day)) self._tzname = tzname self._dst = datetime.timedelta(dst.to_value(u.day)) def utcoffset(self, dt): return self._utcoffset def tzname(self, dt): return str(self._tzname) def dst(self, dt): return self._dst class TimeString(TimeUnique): """ Base class for string-like time representations. This class assumes that anything following the last decimal point to the right is a fraction of a second. **Fast C-based parser** Time format classes can take advantage of a fast C-based parser if the times are represented as fixed-format strings with year, month, day-of-month, hour, minute, second, OR year, day-of-year, hour, minute, second. This can be a factor of 20 or more faster than the pure Python parser. Fixed format means that the components always have the same number of characters. The Python parser will accept ``2001-9-2`` as a date, but the C parser would require ``2001-09-02``. A subclass in this case must define a class attribute ``fast_parser_pars`` which is a `dict` with all of the keys below. An inherited attribute is not checked, only an attribute in the class ``__dict__``. - ``delims`` (tuple of int): ASCII code for character at corresponding ``starts`` position (0 => no character) - ``starts`` (tuple of int): position where component starts (including delimiter if present). Use -1 for the month component for format that use day of year. - ``stops`` (tuple of int): position where component ends. Use -1 to continue to end of string, or for the month component for formats that use day of year. - ``break_allowed`` (tuple of int): if true (1) then the time string can legally end just before the corresponding component (e.g. "2000-01-01" is a valid time but "2000-01-01 12" is not). - ``has_day_of_year`` (int): 0 if dates have year, month, day; 1 if year, day-of-year """ def __init_subclass__(cls, **kwargs): if 'fast_parser_pars' in cls.__dict__: fpp = cls.fast_parser_pars fpp = np.array(list(zip(map(chr, fpp['delims']), fpp['starts'], fpp['stops'], fpp['break_allowed'])), _parse_times.dt_pars) if cls.fast_parser_pars['has_day_of_year']: fpp['start'][1] = fpp['stop'][1] = -1 cls._fast_parser = _parse_times.create_parser(fpp) super().__init_subclass__(**kwargs) def _check_val_type(self, val1, val2): if val1.dtype.kind not in ('S', 'U') and val1.size: raise TypeError(f'Input values for {self.name} class must be strings') if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def parse_string(self, timestr, subfmts): """Read time from a single string, using a set of possible formats.""" # Datetime components required for conversion to JD by ERFA, along # with the default values. components = ('year', 'mon', 'mday', 'hour', 'min', 'sec') defaults = (None, 1, 1, 0, 0, 0) # Assume that anything following "." on the right side is a # floating fraction of a second. try: idot = timestr.rindex('.') except Exception: fracsec = 0.0 else: timestr, fracsec = timestr[:idot], timestr[idot:] fracsec = float(fracsec) for _, strptime_fmt_or_regex, _ in subfmts: if isinstance(strptime_fmt_or_regex, str): try: tm = time.strptime(timestr, strptime_fmt_or_regex) except ValueError: continue else: vals = [getattr(tm, 'tm_' + component) for component in components] else: tm = re.match(strptime_fmt_or_regex, timestr) if tm is None: continue tm = tm.groupdict() vals = [int(tm.get(component, default)) for component, default in zip(components, defaults)] # Add fractional seconds vals[-1] = vals[-1] + fracsec return vals else: raise ValueError(f'Time {timestr} does not match {self.name} format') def set_jds(self, val1, val2): """Parse the time strings contained in val1 and set jd1, jd2""" # If specific input subformat is required then use the Python parser. # Also do this if Time format class does not define `use_fast_parser` or # if the fast parser is entirely disabled. Note that `use_fast_parser` # is ignored for format classes that don't have a fast parser. if (self.in_subfmt != '*' or '_fast_parser' not in self.__class__.__dict__ or conf.use_fast_parser == 'False'): jd1, jd2 = self.get_jds_python(val1, val2) else: try: jd1, jd2 = self.get_jds_fast(val1, val2) except Exception: # Fall through to the Python parser unless fast is forced. if conf.use_fast_parser == 'force': raise else: jd1, jd2 = self.get_jds_python(val1, val2) self.jd1 = jd1 self.jd2 = jd2 def get_jds_python(self, val1, val2): """Parse the time strings contained in val1 and get jd1, jd2""" # Select subformats based on current self.in_subfmt subfmts = self._select_subfmts(self.in_subfmt) # Be liberal in what we accept: convert bytes to ascii. # Here .item() is needed for arrays with entries of unequal length, # to strip trailing 0 bytes. to_string = (str if val1.dtype.kind == 'U' else lambda x: str(x.item(), encoding='ascii')) iterator = np.nditer([val1, None, None, None, None, None, None], flags=['zerosize_ok'], op_dtypes=[None] + 5 * [np.intc] + [np.double]) for val, iy, im, id, ihr, imin, dsec in iterator: val = to_string(val) iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = ( self.parse_string(val, subfmts)) jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), *iterator.operands[1:]) jd1, jd2 = day_frac(jd1, jd2) return jd1, jd2 def get_jds_fast(self, val1, val2): """Use fast C parser to parse time strings in val1 and get jd1, jd2""" # Handle bytes or str input and convert to uint8. We need to the # dtype _parse_times.dt_u1 instead of uint8, since otherwise it is # not possible to create a gufunc with structured dtype output. # See note about ufunc type resolver in pyerfa/erfa/ufunc.c.templ. if val1.dtype.kind == 'U': # Note: val1.astype('S') is *very* slow, so we check ourselves # that the input is pure ASCII. val1_uint32 = val1.view((np.uint32, val1.dtype.itemsize // 4)) if np.any(val1_uint32 > 127): raise ValueError('input is not pure ASCII') # It might be possible to avoid making a copy via astype with # cleverness in parse_times.c but leave that for another day. chars = val1_uint32.astype(_parse_times.dt_u1) else: chars = val1.view((_parse_times.dt_u1, val1.dtype.itemsize)) # Call the fast parsing ufunc. time_struct = self._fast_parser(chars) jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), time_struct['year'], time_struct['month'], time_struct['day'], time_struct['hour'], time_struct['minute'], time_struct['second']) return day_frac(jd1, jd2) def str_kwargs(self): """ Generator that yields a dict of values corresponding to the calendar date and time for the internal JD values. """ scale = self.scale.upper().encode('ascii'), iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision, self.jd1, self.jd2_filled) # Get the str_fmt element of the first allowed output subformat _, _, str_fmt = self._select_subfmts(self.out_subfmt)[0] yday = None has_yday = '{yday:' in str_fmt ihrs = ihmsfs['h'] imins = ihmsfs['m'] isecs = ihmsfs['s'] ifracs = ihmsfs['f'] for iy, im, id, ihr, imin, isec, ifracsec in np.nditer( [iys, ims, ids, ihrs, imins, isecs, ifracs], flags=['zerosize_ok']): if has_yday: yday = datetime.datetime(iy, im, id).timetuple().tm_yday yield {'year': int(iy), 'mon': int(im), 'day': int(id), 'hour': int(ihr), 'min': int(imin), 'sec': int(isec), 'fracsec': int(ifracsec), 'yday': yday} def format_string(self, str_fmt, **kwargs): """Write time to a string using a given format. By default, just interprets str_fmt as a format string, but subclasses can add to this. """ return str_fmt.format(**kwargs) @property def value(self): # Select the first available subformat based on current # self.out_subfmt subfmts = self._select_subfmts(self.out_subfmt) _, _, str_fmt = subfmts[0] # TODO: fix this ugly hack if self.precision > 0 and str_fmt.endswith('{sec:02d}'): str_fmt += '.{fracsec:0' + str(self.precision) + 'd}' # Try to optimize this later. Can't pre-allocate because length of # output could change, e.g. year rolls from 999 to 1000. outs = [] for kwargs in self.str_kwargs(): outs.append(str(self.format_string(str_fmt, **kwargs))) return np.array(outs).reshape(self.jd1.shape) class TimeISO(TimeString): """ ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...". For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000. The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date_hm': date + hours, mins - 'date': date """ name = 'iso' subfmts = (('date_hms', '%Y-%m-%d %H:%M:%S', # XXX To Do - use strftime for output ?? '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'), ('date_hm', '%Y-%m-%d %H:%M', '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'), ('date', '%Y-%m-%d', '{year:d}-{mon:02d}-{day:02d}')) # Define positions and starting delimiter for year, month, day, hour, # minute, seconds components of an ISO time. This is used by the fast # C-parser parse_ymdhms_times() # # "2000-01-12 13:14:15.678" # 01234567890123456789012 # yyyy-mm-dd hh:mm:ss.fff # Parsed as ('yyyy', '-mm', '-dd', ' hh', ':mm', ':ss', '.fff') fast_parser_pars = dict( delims=(0, ord('-'), ord('-'), ord(' '), ord(':'), ord(':'), ord('.')), starts=(0, 4, 7, 10, 13, 16, 19), stops=(3, 6, 9, 12, 15, 18, -1), # Break allowed *before* # y m d h m s f break_allowed=(0, 0, 0, 1, 0, 1, 1), has_day_of_year=0) def parse_string(self, timestr, subfmts): # Handle trailing 'Z' for UTC time if timestr.endswith('Z'): if self.scale != 'utc': raise ValueError("Time input terminating in 'Z' must have " "scale='UTC'") timestr = timestr[:-1] return super().parse_string(timestr, subfmts) class TimeISOT(TimeISO): """ ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...". This is the same as TimeISO except for a "T" instead of space between the date and time. For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000. The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date_hm': date + hours, mins - 'date': date """ name = 'isot' subfmts = (('date_hms', '%Y-%m-%dT%H:%M:%S', '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), ('date_hm', '%Y-%m-%dT%H:%M', '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'), ('date', '%Y-%m-%d', '{year:d}-{mon:02d}-{day:02d}')) # See TimeISO for explanation fast_parser_pars = dict( delims=(0, ord('-'), ord('-'), ord('T'), ord(':'), ord(':'), ord('.')), starts=(0, 4, 7, 10, 13, 16, 19), stops=(3, 6, 9, 12, 15, 18, -1), # Break allowed *before* # y m d h m s f break_allowed=(0, 0, 0, 1, 0, 1, 1), has_day_of_year=0) class TimeYearDayTime(TimeISO): """ Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...". The day-of-year (DOY) goes from 001 to 365 (366 in leap years). For example, 2000:001:00:00:00.000 is midnight on January 1, 2000. The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date_hm': date + hours, mins - 'date': date """ name = 'yday' subfmts = (('date_hms', '%Y:%j:%H:%M:%S', '{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'), ('date_hm', '%Y:%j:%H:%M', '{year:d}:{yday:03d}:{hour:02d}:{min:02d}'), ('date', '%Y:%j', '{year:d}:{yday:03d}')) # Define positions and starting delimiter for year, month, day, hour, # minute, seconds components of an ISO time. This is used by the fast # C-parser parse_ymdhms_times() # # "2000:123:13:14:15.678" # 012345678901234567890 # yyyy:ddd:hh:mm:ss.fff # Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff') # # delims: character at corresponding `starts` position (0 => no character) # starts: position where component starts (including delimiter if present) # stops: position where component ends (-1 => continue to end of string) fast_parser_pars = dict( delims=(0, 0, ord(':'), ord(':'), ord(':'), ord(':'), ord('.')), starts=(0, -1, 4, 8, 11, 14, 17), stops=(3, -1, 7, 10, 13, 16, -1), # Break allowed before: # y m d h m s f break_allowed=(0, 0, 0, 1, 0, 1, 1), has_day_of_year=1) class TimeDatetime64(TimeISOT): name = 'datetime64' def _check_val_type(self, val1, val2): if not val1.dtype.kind == 'M': if val1.size > 0: raise TypeError('Input values for {} class must be ' 'datetime64 objects'.format(self.name)) else: val1 = np.array([], 'datetime64[D]') if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def set_jds(self, val1, val2): # If there are any masked values in the ``val1`` datetime64 array # ('NaT') then stub them with a valid date so downstream parse_string # will work. The value under the mask is arbitrary but a "modern" date # is good. mask = np.isnat(val1) masked = np.any(mask) if masked: val1 = val1.copy() val1[mask] = '2000' # Make sure M(onth) and Y(ear) dates will parse and convert to bytestring if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']: val1 = val1.astype('datetime64[D]') val1 = val1.astype('S') # Standard ISO string parsing now super().set_jds(val1, val2) # Finally apply mask if necessary if masked: self.jd2[mask] = np.nan @property def value(self): precision = self.precision self.precision = 9 ret = super().value self.precision = precision return ret.astype('datetime64') class TimeFITS(TimeString): """ FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]". ISOT but can give signed five-digit year (mostly for negative years); The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date': date - 'longdate_hms': as 'date_hms', but with signed 5-digit year - 'longdate': as 'date', but with signed 5-digit year See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583). """ name = 'fits' subfmts = ( ('date_hms', (r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T' r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'), '{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), ('date', r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)', '{year:04d}-{mon:02d}-{day:02d}'), ('longdate_hms', (r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T' r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'), '{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), ('longdate', r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)', '{year:+06d}-{mon:02d}-{day:02d}')) # Add the regex that parses the scale and possible realization. # Support for this is deprecated. Read old style but no longer write # in this style. subfmts = tuple( (subfmt[0], subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?', subfmt[2]) for subfmt in subfmts) def parse_string(self, timestr, subfmts): """Read time and deprecated scale if present""" # Try parsing with any of the allowed sub-formats. for _, regex, _ in subfmts: tm = re.match(regex, timestr) if tm: break else: raise ValueError(f'Time {timestr} does not match {self.name} format') tm = tm.groupdict() # Scale and realization are deprecated and strings in this form # are no longer created. We issue a warning but still use the value. if tm['scale'] is not None: warnings.warn("FITS time strings should no longer have embedded time scale.", AstropyDeprecationWarning) # If a scale was given, translate from a possible deprecated # timescale identifier to the scale used by Time. fits_scale = tm['scale'].upper() scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower()) if scale not in TIME_SCALES: raise ValueError("Scale {!r} is not in the allowed scales {}" .format(scale, sorted(TIME_SCALES))) # If no scale was given in the initialiser, set the scale to # that given in the string. Realization is ignored # and is only supported to allow old-style strings to be # parsed. if self._scale is None: self._scale = scale if scale != self.scale: raise ValueError("Input strings for {} class must all " "have consistent time scales." .format(self.name)) return [int(tm['year']), int(tm['mon']), int(tm['mday']), int(tm.get('hour', 0)), int(tm.get('min', 0)), float(tm.get('sec', 0.))] @property def value(self): """Convert times to strings, using signed 5 digit if necessary.""" if 'long' not in self.out_subfmt: # If we have times before year 0 or after year 9999, we can # output only in a "long" format, using signed 5-digit years. jd = self.jd1 + self.jd2 if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5): self.out_subfmt = 'long' + self.out_subfmt return super().value class TimeEpochDate(TimeNumeric): """ Base class for support floating point Besselian and Julian epoch dates """ _default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'. def set_jds(self, val1, val2): self._check_scale(self._scale) # validate scale. epoch_to_jd = getattr(erfa, self.epoch_to_jd) jd1, jd2 = epoch_to_jd(val1 + val2) self.jd1, self.jd2 = day_frac(jd1, jd2) def to_value(self, **kwargs): jd_to_epoch = getattr(erfa, self.jd_to_epoch) value = jd_to_epoch(self.jd1, self.jd2) return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs) value = property(to_value) class TimeBesselianEpoch(TimeEpochDate): """Besselian Epoch year as floating point value(s) like 1950.0""" name = 'byear' epoch_to_jd = 'epb2jd' jd_to_epoch = 'epb' def _check_val_type(self, val1, val2): """Input value validation, typically overridden by derived classes""" if hasattr(val1, 'to') and hasattr(val1, 'unit'): raise ValueError("Cannot use Quantities for 'byear' format, " "as the interpretation would be ambiguous. " "Use float with Besselian year instead. ") # FIXME: is val2 really okay here? return super()._check_val_type(val1, val2) class TimeJulianEpoch(TimeEpochDate): """Julian Epoch year as floating point value(s) like 2000.0""" name = 'jyear' unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities epoch_to_jd = 'epj2jd' jd_to_epoch = 'epj' class TimeEpochDateString(TimeString): """ Base class to support string Besselian and Julian epoch dates such as 'B1950.0' or 'J2000.0' respectively. """ _default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'. def set_jds(self, val1, val2): epoch_prefix = self.epoch_prefix # Be liberal in what we accept: convert bytes to ascii. to_string = (str if val1.dtype.kind == 'U' else lambda x: str(x.item(), encoding='ascii')) iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double], flags=['zerosize_ok']) for val, years in iterator: try: time_str = to_string(val) epoch_type, year_str = time_str[0], time_str[1:] year = float(year_str) if epoch_type.upper() != epoch_prefix: raise ValueError except (IndexError, ValueError, UnicodeEncodeError): raise ValueError(f'Time {val} does not match {self.name} format') else: years[...] = year self._check_scale(self._scale) # validate scale. epoch_to_jd = getattr(erfa, self.epoch_to_jd) jd1, jd2 = epoch_to_jd(iterator.operands[-1]) self.jd1, self.jd2 = day_frac(jd1, jd2) @property def value(self): jd_to_epoch = getattr(erfa, self.jd_to_epoch) years = jd_to_epoch(self.jd1, self.jd2) # Use old-style format since it is a factor of 2 faster str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f' outs = [str_fmt % year for year in years.flat] return np.array(outs).reshape(self.jd1.shape) class TimeBesselianEpochString(TimeEpochDateString): """Besselian Epoch year as string value(s) like 'B1950.0'""" name = 'byear_str' epoch_to_jd = 'epb2jd' jd_to_epoch = 'epb' epoch_prefix = 'B' class TimeJulianEpochString(TimeEpochDateString): """Julian Epoch year as string value(s) like 'J2000.0'""" name = 'jyear_str' epoch_to_jd = 'epj2jd' jd_to_epoch = 'epj' epoch_prefix = 'J' class TimeDeltaFormat(TimeFormat): """Base class for time delta representations""" _registry = TIME_DELTA_FORMATS def _check_scale(self, scale): """ Check that the scale is in the allowed list of scales, or is `None` """ if scale is not None and scale not in TIME_DELTA_SCALES: raise ScaleValueError("Scale value '{}' not in " "allowed values {}" .format(scale, TIME_DELTA_SCALES)) return scale class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric): def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. self.jd1, self.jd2 = day_frac(val1, val2, divisor=1. / self.unit) def to_value(self, **kwargs): # Note that 1/unit is always exactly representable, so the # following multiplications are exact. factor = 1. / self.unit jd1 = self.jd1 * factor jd2 = self.jd2 * factor return super().to_value(jd1=jd1, jd2=jd2, **kwargs) value = property(to_value) class TimeDeltaSec(TimeDeltaNumeric): """Time delta in SI seconds""" name = 'sec' unit = 1. / erfa.DAYSEC # for quantity input class TimeDeltaJD(TimeDeltaNumeric): """Time delta in Julian days (86400 SI seconds)""" name = 'jd' unit = 1. class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique): """Time delta in datetime.timedelta""" name = 'datetime' def _check_val_type(self, val1, val2): if not all(isinstance(val, datetime.timedelta) for val in val1.flat): raise TypeError('Input values for {} class must be ' 'datetime.timedelta objects'.format(self.name)) if val2 is not None: raise ValueError( f'{self.name} objects do not accept a val2 but you provided {val2}') return val1, None def set_jds(self, val1, val2): self._check_scale(self._scale) # Validate scale. iterator = np.nditer([val1, None, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=[None, np.double, np.double]) day = datetime.timedelta(days=1) for val, jd1, jd2 in iterator: jd1[...], other = divmod(val.item(), day) jd2[...] = other / day self.jd1, self.jd2 = day_frac(iterator.operands[-2], iterator.operands[-1]) @property def value(self): iterator = np.nditer([self.jd1, self.jd2, None], flags=['refs_ok', 'zerosize_ok'], op_dtypes=[None, None, object]) for jd1, jd2, out in iterator: jd1_, jd2_ = day_frac(jd1, jd2) out[...] = datetime.timedelta(days=jd1_, microseconds=jd2_ * 86400 * 1e6) return self.mask_if_needed(iterator.operands[-1]) def _validate_jd_for_storage(jd): if isinstance(jd, (float, int)): return np.array(jd, dtype=np.float_) if (isinstance(jd, np.generic) and (jd.dtype.kind == 'f' and jd.dtype.itemsize <= 8 or jd.dtype.kind in 'iu')): return np.array(jd, dtype=np.float_) elif (isinstance(jd, np.ndarray) and jd.dtype.kind == 'f' and jd.dtype.itemsize == 8): return jd else: raise TypeError( f"JD values must be arrays (possibly zero-dimensional) " f"of floats but we got {jd!r} of type {type(jd)}") def _broadcast_writeable(jd1, jd2): if jd1.shape == jd2.shape: return jd1, jd2 # When using broadcast_arrays, *both* are flagged with # warn-on-write, even the one that wasn't modified, and # require "C" only clears the flag if it actually copied # anything. shape = np.broadcast(jd1, jd2).shape if jd1.shape == shape: s_jd1 = jd1 else: s_jd1 = np.require(np.broadcast_to(jd1, shape), requirements=["C", "W"]) if jd2.shape == shape: s_jd2 = jd2 else: s_jd2 = np.require(np.broadcast_to(jd2, shape), requirements=["C", "W"]) return s_jd1, s_jd2 # Import symbols from core.py that are used in this module. This succeeds # because __init__.py imports format.py just before core.py. from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError # noqa
9a61460dce45f518dd4677b45d10e22c226fdb31db18829988e4958e6d6a6a54
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst __all__ = ['quantity_input'] import inspect from numbers import Number from collections.abc import Sequence from functools import wraps import numpy as np from . import _typing as T from .core import (Unit, UnitBase, UnitsError, add_enabled_equivalencies, dimensionless_unscaled) from .function.core import FunctionUnitBase from .physical import PhysicalType, get_physical_type from .quantity import Quantity from .structured import StructuredUnit NoneType = type(None) def _get_allowed_units(targets): """ From a list of target units (either as strings or unit objects) and physical types, return a list of Unit objects. """ allowed_units = [] for target in targets: try: unit = Unit(target) except (TypeError, ValueError): try: unit = get_physical_type(target)._unit except (TypeError, ValueError, KeyError): # KeyError for Enum raise ValueError(f"Invalid unit or physical type {target!r}.") from None allowed_units.append(unit) return allowed_units def _validate_arg_value(param_name, func_name, arg, targets, equivalencies, strict_dimensionless=False): """ Validates the object passed in to the wrapped function, ``arg``, with target unit or physical type, ``target``. """ if len(targets) == 0: return allowed_units = _get_allowed_units(targets) # If dimensionless is an allowed unit and the argument is unit-less, # allow numbers or numpy arrays with numeric dtypes if (dimensionless_unscaled in allowed_units and not strict_dimensionless and not hasattr(arg, "unit")): if isinstance(arg, Number): return elif (isinstance(arg, np.ndarray) and np.issubdtype(arg.dtype, np.number)): return for allowed_unit in allowed_units: try: is_equivalent = arg.unit.is_equivalent(allowed_unit, equivalencies=equivalencies) if is_equivalent: break except AttributeError: # Either there is no .unit or no .is_equivalent if hasattr(arg, "unit"): error_msg = ("a 'unit' attribute without an 'is_equivalent' method") else: error_msg = "no 'unit' attribute" raise TypeError(f"Argument '{param_name}' to function '{func_name}'" f" has {error_msg}. You should pass in an astropy " "Quantity instead.") else: error_msg = (f"Argument '{param_name}' to function '{func_name}' must " "be in units convertible to") if len(targets) > 1: targ_names = ", ".join([f"'{str(targ)}'" for targ in targets]) raise UnitsError(f"{error_msg} one of: {targ_names}.") else: raise UnitsError(f"{error_msg} '{str(targets[0])}'.") def _parse_annotation(target): if target in (None, NoneType, inspect._empty): return target # check if unit-like try: unit = Unit(target) except (TypeError, ValueError): try: ptype = get_physical_type(target) except (TypeError, ValueError, KeyError): # KeyError for Enum if isinstance(target, str): raise ValueError(f"invalid unit or physical type {target!r}.") from None else: return ptype else: return unit # could be a type hint origin = T.get_origin(target) if origin is T.Union: return [_parse_annotation(t) for t in T.get_args(target)] elif origin is not T.Annotated: # can't be Quantity[] return False # parse type hint cls, *annotations = T.get_args(target) if not issubclass(cls, Quantity) or not annotations: return False # get unit from type hint unit, *rest = annotations if not isinstance(unit, (UnitBase, PhysicalType)): return False return unit class QuantityInput: @classmethod def as_decorator(cls, func=None, **kwargs): r""" A decorator for validating the units of arguments to functions. Unit specifications can be provided as keyword arguments to the decorator, or by using function annotation syntax. Arguments to the decorator take precedence over any function annotations present. A `~astropy.units.UnitsError` will be raised if the unit attribute of the argument is not equivalent to the unit specified to the decorator or in the annotation. If the argument has no unit attribute, i.e. it is not a Quantity object, a `ValueError` will be raised unless the argument is an annotation. This is to allow non Quantity annotations to pass through. Where an equivalency is specified in the decorator, the function will be executed with that equivalency in force. Notes ----- The checking of arguments inside variable arguments to a function is not supported (i.e. \*arg or \**kwargs). The original function is accessible by the attributed ``__wrapped__``. See :func:`functools.wraps` for details. Examples -------- .. code-block:: python import astropy.units as u @u.quantity_input(myangle=u.arcsec) def myfunction(myangle): return myangle**2 .. code-block:: python import astropy.units as u @u.quantity_input def myfunction(myangle: u.arcsec): return myangle**2 Or using a unit-aware Quantity annotation. .. code-block:: python @u.quantity_input def myfunction(myangle: u.Quantity[u.arcsec]): return myangle**2 Also you can specify a return value annotation, which will cause the function to always return a `~astropy.units.Quantity` in that unit. .. code-block:: python import astropy.units as u @u.quantity_input def myfunction(myangle: u.arcsec) -> u.deg**2: return myangle**2 Using equivalencies:: import astropy.units as u @u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy()) def myfunction(myenergy): return myenergy**2 """ self = cls(**kwargs) if func is not None and not kwargs: return self(func) else: return self def __init__(self, func=None, strict_dimensionless=False, **kwargs): self.equivalencies = kwargs.pop('equivalencies', []) self.decorator_kwargs = kwargs self.strict_dimensionless = strict_dimensionless def __call__(self, wrapped_function): # Extract the function signature for the function we are wrapping. wrapped_signature = inspect.signature(wrapped_function) # Define a new function to return in place of the wrapped one @wraps(wrapped_function) def wrapper(*func_args, **func_kwargs): # Bind the arguments to our new function to the signature of the original. bound_args = wrapped_signature.bind(*func_args, **func_kwargs) # Iterate through the parameters of the original signature for param in wrapped_signature.parameters.values(): # We do not support variable arguments (*args, **kwargs) if param.kind in (inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL): continue # Catch the (never triggered) case where bind relied on a default value. if (param.name not in bound_args.arguments and param.default is not param.empty): bound_args.arguments[param.name] = param.default # Get the value of this parameter (argument to new function) arg = bound_args.arguments[param.name] # Get target unit or physical type, either from decorator kwargs # or annotations if param.name in self.decorator_kwargs: targets = self.decorator_kwargs[param.name] is_annotation = False else: targets = param.annotation is_annotation = True # parses to unit if it's an annotation (or list thereof) targets = _parse_annotation(targets) # If the targets is empty, then no target units or physical # types were specified so we can continue to the next arg if targets is inspect.Parameter.empty: continue # If the argument value is None, and the default value is None, # pass through the None even if there is a target unit if arg is None and param.default is None: continue # Here, we check whether multiple target unit/physical type's # were specified in the decorator/annotation, or whether a # single string (unit or physical type) or a Unit object was # specified if (isinstance(targets, str) or not isinstance(targets, Sequence)): valid_targets = [targets] # Check for None in the supplied list of allowed units and, if # present and the passed value is also None, ignore. elif None in targets or NoneType in targets: if arg is None: continue else: valid_targets = [t for t in targets if t is not None] else: valid_targets = targets # If we're dealing with an annotation, skip all the targets that # are not strings or subclasses of Unit. This is to allow # non unit related annotations to pass through if is_annotation: valid_targets = [t for t in valid_targets if isinstance(t, (str, UnitBase, PhysicalType))] # Now we loop over the allowed units/physical types and validate # the value of the argument: _validate_arg_value(param.name, wrapped_function.__name__, arg, valid_targets, self.equivalencies, self.strict_dimensionless) # Call the original function with any equivalencies in force. with add_enabled_equivalencies(self.equivalencies): return_ = wrapped_function(*func_args, **func_kwargs) # Return ra = wrapped_signature.return_annotation valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn) if ra not in valid_empty: target = (ra if T.get_origin(ra) not in (T.Annotated, T.Union) else _parse_annotation(ra)) if isinstance(target, str) or not isinstance(target, Sequence): target = [target] valid_targets = [t for t in target if isinstance(t, (str, UnitBase, PhysicalType))] _validate_arg_value("return", wrapped_function.__name__, return_, valid_targets, self.equivalencies, self.strict_dimensionless) if len(valid_targets) > 0: return_ <<= valid_targets[0] return return_ return wrapper quantity_input = QuantityInput.as_decorator
6e22b6f296084fcb81cf2a1e4acd1bbb4aa695238cf7a9cee400daa246530a39
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines colloquially used Imperial units. They are available in the `astropy.units.imperial` namespace, but not in the top-level `astropy.units` namespace, e.g.:: >>> import astropy.units as u >>> mph = u.imperial.mile / u.hour >>> mph Unit("mi / h") To include them in `~astropy.units.UnitBase.compose` and the results of `~astropy.units.UnitBase.find_equivalent_units`, do:: >>> import astropy.units as u >>> u.imperial.enable() # doctest: +SKIP """ from .core import UnitBase, def_unit from . import si _ns = globals() ########################################################################### # LENGTH def_unit(['inch'], 2.54 * si.cm, namespace=_ns, doc="International inch") def_unit(['ft', 'foot'], 12 * inch, namespace=_ns, doc="International foot") def_unit(['yd', 'yard'], 3 * ft, namespace=_ns, doc="International yard") def_unit(['mi', 'mile'], 5280 * ft, namespace=_ns, doc="International mile") def_unit(['mil', 'thou'], 0.001 * inch, namespace=_ns, doc="Thousandth of an inch") def_unit(['nmi', 'nauticalmile', 'NM'], 1852 * si.m, namespace=_ns, doc="Nautical mile") def_unit(['fur', 'furlong'], 660 * ft, namespace=_ns, doc="Furlong") ########################################################################### # AREAS def_unit(['ac', 'acre'], 43560 * ft ** 2, namespace=_ns, doc="International acre") ########################################################################### # VOLUMES def_unit(['gallon'], si.liter / 0.264172052, namespace=_ns, doc="U.S. liquid gallon") def_unit(['quart'], gallon / 4, namespace=_ns, doc="U.S. liquid quart") def_unit(['pint'], quart / 2, namespace=_ns, doc="U.S. liquid pint") def_unit(['cup'], pint / 2, namespace=_ns, doc="U.S. customary cup") def_unit(['foz', 'fluid_oz', 'fluid_ounce'], cup / 8, namespace=_ns, doc="U.S. fluid ounce") def_unit(['tbsp', 'tablespoon'], foz / 2, namespace=_ns, doc="U.S. customary tablespoon") def_unit(['tsp', 'teaspoon'], tbsp / 3, namespace=_ns, doc="U.S. customary teaspoon") ########################################################################### # MASS def_unit(['oz', 'ounce'], 28.349523125 * si.g, namespace=_ns, doc="International avoirdupois ounce: mass") def_unit(['lb', 'lbm', 'pound'], 16 * oz, namespace=_ns, doc="International avoirdupois pound: mass") def_unit(['st', 'stone'], 14 * lb, namespace=_ns, doc="International avoirdupois stone: mass") def_unit(['ton'], 2000 * lb, namespace=_ns, doc="International avoirdupois ton: mass") def_unit(['slug'], 32.174049 * lb, namespace=_ns, doc="slug: mass") ########################################################################### # SPEED def_unit(['kn', 'kt', 'knot', 'NMPH'], nmi / si.h, namespace=_ns, doc="nautical unit of speed: 1 nmi per hour") ########################################################################### # FORCE def_unit('lbf', slug * ft * si.s**-2, namespace=_ns, doc="Pound: force") def_unit(['kip', 'kilopound'], 1000 * lbf, namespace=_ns, doc="Kilopound: force") ########################################################################## # ENERGY def_unit(['BTU', 'btu'], 1.05505585 * si.kJ, namespace=_ns, doc="British thermal unit") def_unit(['cal', 'calorie'], 4.184 * si.J, namespace=_ns, doc="Thermochemical calorie: pre-SI metric unit of energy") def_unit(['kcal', 'Cal', 'Calorie', 'kilocal', 'kilocalorie'], 1000 * cal, namespace=_ns, doc="Calorie: colloquial definition of Calorie") ########################################################################## # PRESSURE def_unit('psi', lbf * inch ** -2, namespace=_ns, doc="Pound per square inch: pressure") ########################################################################### # POWER # Imperial units def_unit(['hp', 'horsepower'], si.W / 0.00134102209, namespace=_ns, doc="Electrical horsepower") ########################################################################### # TEMPERATURE def_unit(['deg_F', 'Fahrenheit'], namespace=_ns, doc='Degrees Fahrenheit', format={'latex': r'{}^{\circ}F', 'unicode': '°F'}) def_unit(['deg_R', 'Rankine'], namespace=_ns, doc='Rankine scale: absolute scale of thermodynamic temperature') ########################################################################### # CLEANUP del UnitBase del def_unit ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) def enable(): """ Enable Imperial units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable Imperial units only temporarily. """ # Local import to avoid cyclical import from .core import add_enabled_units # Local import to avoid polluting namespace import inspect return add_enabled_units(inspect.getmodule(enable))
9665f54dbb8af288bd88ee6f2c2320e696aaf7df49826bfbd45b255a44782732
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines units used in the CDS format, both the units defined in `Centre de Données astronomiques de Strasbourg <http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0 <http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_ format and the `complete set of supported units <https://vizier.u-strasbg.fr/viz-bin/Unit>`_. This format is used by VOTable up to version 1.2. These units are not available in the top-level `astropy.units` namespace. To use these units, you must import the `astropy.units.cds` module:: >>> from astropy.units import cds >>> q = 10. * cds.lyr # doctest: +SKIP To include them in `~astropy.units.UnitBase.compose` and the results of `~astropy.units.UnitBase.find_equivalent_units`, do:: >>> from astropy.units import cds >>> cds.enable() # doctest: +SKIP """ _ns = globals() def _initialize_module(): # Local imports to avoid polluting top-level namespace import numpy as np from . import core from astropy import units as u from astropy.constants import si as _si # The CDS format also supports power-of-2 prefixes as defined here: # http://physics.nist.gov/cuu/Units/binary.html prefixes = core.si_prefixes + core.binary_prefixes # CDS only uses the short prefixes prefixes = [(short, short, factor) for (short, long, factor) in prefixes] # The following units are defined in alphabetical order, directly from # here: https://vizier.u-strasbg.fr/viz-bin/Unit mapping = [ (['A'], u.A, "Ampere"), (['a'], u.a, "year", ['P']), (['a0'], _si.a0, "Bohr radius"), (['al'], u.lyr, "Light year", ['c', 'd']), (['lyr'], u.lyr, "Light year"), (['alpha'], _si.alpha, "Fine structure constant"), ((['AA', 'Å'], ['Angstrom', 'Angstroem']), u.AA, "Angstrom"), (['arcmin', 'arcm'], u.arcminute, "minute of arc"), (['arcsec', 'arcs'], u.arcsecond, "second of arc"), (['atm'], _si.atm, "atmosphere"), (['AU', 'au'], u.au, "astronomical unit"), (['bar'], u.bar, "bar"), (['barn'], u.barn, "barn"), (['bit'], u.bit, "bit"), (['byte'], u.byte, "byte"), (['C'], u.C, "Coulomb"), (['c'], _si.c, "speed of light", ['p']), (['cal'], 4.1854 * u.J, "calorie"), (['cd'], u.cd, "candela"), (['ct'], u.ct, "count"), (['D'], u.D, "Debye (dipole)"), (['d'], u.d, "Julian day", ['c']), ((['deg', '°'], ['degree']), u.degree, "degree"), (['dyn'], u.dyn, "dyne"), (['e'], _si.e, "electron charge", ['m']), (['eps0'], _si.eps0, "electric constant"), (['erg'], u.erg, "erg"), (['eV'], u.eV, "electron volt"), (['F'], u.F, "Farad"), (['G'], _si.G, "Gravitation constant"), (['g'], u.g, "gram"), (['gauss'], u.G, "Gauss"), (['geoMass', 'Mgeo'], u.M_earth, "Earth mass"), (['H'], u.H, "Henry"), (['h'], u.h, "hour", ['p']), (['hr'], u.h, "hour"), (['\\h'], _si.h, "Planck constant"), (['Hz'], u.Hz, "Hertz"), (['inch'], 0.0254 * u.m, "inch"), (['J'], u.J, "Joule"), (['JD'], u.d, "Julian day", ['M']), (['jovMass', 'Mjup'], u.M_jup, "Jupiter mass"), (['Jy'], u.Jy, "Jansky"), (['K'], u.K, "Kelvin"), (['k'], _si.k_B, "Boltzmann"), (['l'], u.l, "litre", ['a']), (['lm'], u.lm, "lumen"), (['Lsun', 'solLum'], u.solLum, "solar luminosity"), (['lx'], u.lx, "lux"), (['m'], u.m, "meter"), (['mag'], u.mag, "magnitude"), (['me'], _si.m_e, "electron mass"), (['min'], u.minute, "minute"), (['MJD'], u.d, "Julian day"), (['mmHg'], 133.322387415 * u.Pa, "millimeter of mercury"), (['mol'], u.mol, "mole"), (['mp'], _si.m_p, "proton mass"), (['Msun', 'solMass'], u.solMass, "solar mass"), ((['mu0', 'µ0'], []), _si.mu0, "magnetic constant"), (['muB'], _si.muB, "Bohr magneton"), (['N'], u.N, "Newton"), (['Ohm'], u.Ohm, "Ohm"), (['Pa'], u.Pa, "Pascal"), (['pc'], u.pc, "parsec"), (['ph'], u.ph, "photon"), (['pi'], u.Unit(np.pi), "π"), (['pix'], u.pix, "pixel"), (['ppm'], u.Unit(1e-6), "parts per million"), (['R'], _si.R, "gas constant"), (['rad'], u.radian, "radian"), (['Rgeo'], _si.R_earth, "Earth equatorial radius"), (['Rjup'], _si.R_jup, "Jupiter equatorial radius"), (['Rsun', 'solRad'], u.solRad, "solar radius"), (['Ry'], u.Ry, "Rydberg"), (['S'], u.S, "Siemens"), (['s', 'sec'], u.s, "second"), (['sr'], u.sr, "steradian"), (['Sun'], u.Sun, "solar unit"), (['T'], u.T, "Tesla"), (['t'], 1e3 * u.kg, "metric tonne", ['c']), (['u'], _si.u, "atomic mass", ['da', 'a']), (['V'], u.V, "Volt"), (['W'], u.W, "Watt"), (['Wb'], u.Wb, "Weber"), (['yr'], u.a, "year"), ] for entry in mapping: if len(entry) == 3: names, unit, doc = entry excludes = [] else: names, unit, doc, excludes = entry core.def_unit(names, unit, prefixes=prefixes, namespace=_ns, doc=doc, exclude_prefixes=excludes) core.def_unit(['µas'], u.microarcsecond, doc="microsecond of arc", namespace=_ns) core.def_unit(['mas'], u.milliarcsecond, doc="millisecond of arc", namespace=_ns) core.def_unit(['---', '-'], u.dimensionless_unscaled, doc="dimensionless and unscaled", namespace=_ns) core.def_unit(['%'], u.percent, doc="percent", namespace=_ns) # The Vizier "standard" defines this in units of "kg s-3", but # that may not make a whole lot of sense, so here we just define # it as its own new disconnected unit. core.def_unit(['Crab'], prefixes=prefixes, namespace=_ns, doc="Crab (X-ray) flux") _initialize_module() ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) def enable(): """ Enable CDS units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This will disable all of the "default" `astropy.units` units, since there are some namespace clashes between the two. This may be used with the ``with`` statement to enable CDS units only temporarily. """ # Local import to avoid cyclical import from .core import set_enabled_units # Local import to avoid polluting namespace import inspect return set_enabled_units(inspect.getmodule(enable))
538a507c81dd48451201514cf90b447e6712ca63b6aa4d962c08bcf52642b898
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines magnitude zero points and related photometric quantities. The corresponding magnitudes are given in the description of each unit (the actual definitions are in `~astropy.units.function.logarithmic`). """ import numpy as _numpy from .core import UnitBase, def_unit, Unit from astropy.constants import si as _si from . import cgs, si, astrophys _ns = globals() def_unit(['Bol', 'L_bol'], _si.L_bol0, namespace=_ns, prefixes=False, doc="Luminosity corresponding to absolute bolometric magnitude zero " "(magnitude ``M_bol``).") def_unit(['bol', 'f_bol'], _si.L_bol0 / (4 * _numpy.pi * (10.*astrophys.pc)**2), namespace=_ns, prefixes=False, doc="Irradiance corresponding to " "appparent bolometric magnitude zero (magnitude ``m_bol``).") def_unit(['AB', 'ABflux'], 10.**(48.6/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.Hz, namespace=_ns, prefixes=False, doc="AB magnitude zero flux density (magnitude ``ABmag``).") def_unit(['ST', 'STflux'], 10.**(21.1/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.AA, namespace=_ns, prefixes=False, doc="ST magnitude zero flux density (magnitude ``STmag``).") def_unit(['mgy', 'maggy'], namespace=_ns, prefixes=[(['n'], ['nano'], 1e-9)], doc="Maggies - a linear flux unit that is the flux for a mag=0 object." "To tie this onto a specific calibrated unit system, the " "zero_point_flux equivalency should be used.") def zero_point_flux(flux0): """ An equivalency for converting linear flux units ("maggys") defined relative to a standard source into a standardized system. Parameters ---------- flux0 : `~astropy.units.Quantity` The flux of a magnitude-0 object in the "maggy" system. """ flux_unit0 = Unit(flux0) return [(maggy, flux_unit0)] ########################################################################### # CLEANUP del UnitBase del def_unit del cgs, si, astrophys ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
b7eb194e81a6fa696fa02e0d473040aa4a8081a84082d9f7696d577596c75616
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Core units classes and functions """ import inspect import operator import textwrap import warnings import numpy as np from astropy.utils.decorators import lazyproperty from astropy.utils.exceptions import AstropyWarning from astropy.utils.misc import isiterable from .utils import (is_effectively_unity, sanitize_scale, validate_power, resolve_fractions) from . import format as unit_format __all__ = [ 'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError', 'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit', 'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry', 'set_enabled_units', 'add_enabled_units', 'set_enabled_equivalencies', 'add_enabled_equivalencies', 'set_enabled_aliases', 'add_enabled_aliases', 'dimensionless_unscaled', 'one', ] UNITY = 1.0 def _flatten_units_collection(items): """ Given a list of sequences, modules or dictionaries of units, or single units, return a flat set of all the units found. """ if not isinstance(items, list): items = [items] result = set() for item in items: if isinstance(item, UnitBase): result.add(item) else: if isinstance(item, dict): units = item.values() elif inspect.ismodule(item): units = vars(item).values() elif isiterable(item): units = item else: continue for unit in units: if isinstance(unit, UnitBase): result.add(unit) return result def _normalize_equivalencies(equivalencies): """ Normalizes equivalencies, ensuring each is a 4-tuple of the form:: (from_unit, to_unit, forward_func, backward_func) Parameters ---------- equivalencies : list of equivalency pairs Raises ------ ValueError if an equivalency cannot be interpreted """ if equivalencies is None: return [] normalized = [] for i, equiv in enumerate(equivalencies): if len(equiv) == 2: funit, tunit = equiv a = b = lambda x: x elif len(equiv) == 3: funit, tunit, a = equiv b = a elif len(equiv) == 4: funit, tunit, a, b = equiv else: raise ValueError( f"Invalid equivalence entry {i}: {equiv!r}") if not (funit is Unit(funit) and (tunit is None or tunit is Unit(tunit)) and callable(a) and callable(b)): raise ValueError( f"Invalid equivalence entry {i}: {equiv!r}") normalized.append((funit, tunit, a, b)) return normalized class _UnitRegistry: """ Manages a registry of the enabled units. """ def __init__(self, init=[], equivalencies=[], aliases={}): if isinstance(init, _UnitRegistry): # If passed another registry we don't need to rebuild everything. # but because these are mutable types we don't want to create # conflicts so everything needs to be copied. self._equivalencies = init._equivalencies.copy() self._aliases = init._aliases.copy() self._all_units = init._all_units.copy() self._registry = init._registry.copy() self._non_prefix_units = init._non_prefix_units.copy() # The physical type is a dictionary containing sets as values. # All of these must be copied otherwise we could alter the old # registry. self._by_physical_type = {k: v.copy() for k, v in init._by_physical_type.items()} else: self._reset_units() self._reset_equivalencies() self._reset_aliases() self.add_enabled_units(init) self.add_enabled_equivalencies(equivalencies) self.add_enabled_aliases(aliases) def _reset_units(self): self._all_units = set() self._non_prefix_units = set() self._registry = {} self._by_physical_type = {} def _reset_equivalencies(self): self._equivalencies = set() def _reset_aliases(self): self._aliases = {} @property def registry(self): return self._registry @property def all_units(self): return self._all_units @property def non_prefix_units(self): return self._non_prefix_units def set_enabled_units(self, units): """ Sets the units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be "enabled" for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. """ self._reset_units() return self.add_enabled_units(units) def add_enabled_units(self, units): """ Adds to the set of units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be added to the "enabled" set for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. """ units = _flatten_units_collection(units) for unit in units: # Loop through all of the names first, to ensure all of them # are new, then add them all as a single "transaction" below. for st in unit._names: if (st in self._registry and unit != self._registry[st]): raise ValueError( "Object with name {!r} already exists in namespace. " "Filter the set of units to avoid name clashes before " "enabling them.".format(st)) for st in unit._names: self._registry[st] = unit self._all_units.add(unit) if not isinstance(unit, PrefixUnit): self._non_prefix_units.add(unit) hash = unit._get_physical_type_id() self._by_physical_type.setdefault(hash, set()).add(unit) def get_units_with_physical_type(self, unit): """ Get all units in the registry with the same physical type as the given unit. Parameters ---------- unit : UnitBase instance """ return self._by_physical_type.get(unit._get_physical_type_id(), set()) @property def equivalencies(self): return list(self._equivalencies) def set_enabled_equivalencies(self, equivalencies): """ Sets the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple List of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. """ self._reset_equivalencies() return self.add_enabled_equivalencies(equivalencies) def add_enabled_equivalencies(self, equivalencies): """ Adds to the set of equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple List of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. """ # pre-normalize list to help catch mistakes equivalencies = _normalize_equivalencies(equivalencies) self._equivalencies |= set(equivalencies) @property def aliases(self): return self._aliases def set_enabled_aliases(self, aliases): """ Set aliases for units. Parameters ---------- aliases : dict of str, Unit The aliases to set. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. """ self._reset_aliases() self.add_enabled_aliases(aliases) def add_enabled_aliases(self, aliases): """ Add aliases for units. Parameters ---------- aliases : dict of str, Unit The aliases to add. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. """ for alias, unit in aliases.items(): if alias in self._registry and unit != self._registry[alias]: raise ValueError( f"{alias} already means {self._registry[alias]}, so " f"cannot be used as an alias for {unit}.") if alias in self._aliases and unit != self._aliases[alias]: raise ValueError( f"{alias} already is an alias for {self._aliases[alias]}, so " f"cannot be used as an alias for {unit}.") for alias, unit in aliases.items(): if alias not in self._registry and alias not in self._aliases: self._aliases[alias] = unit class _UnitContext: def __init__(self, init=[], equivalencies=[]): _unit_registries.append( _UnitRegistry(init=init, equivalencies=equivalencies)) def __enter__(self): pass def __exit__(self, type, value, tb): _unit_registries.pop() _unit_registries = [_UnitRegistry()] def get_current_unit_registry(): return _unit_registries[-1] def set_enabled_units(units): """ Sets the units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be "enabled" for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> with u.set_enabled_units([u.pc]): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ pc | 3.08568e+16 m | parsec , ] >>> u.m.find_equivalent_units() Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , micron | 1e-06 m | , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , ] """ # get a context with a new registry, using equivalencies of the current one context = _UnitContext( equivalencies=get_current_unit_registry().equivalencies) # in this new current registry, enable the units requested get_current_unit_registry().set_enabled_units(units) return context def add_enabled_units(units): """ Adds to the set of units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be added to the "enabled" set for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> from astropy.units import imperial >>> with u.add_enabled_units(imperial): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , ft | 0.3048 m | foot , fur | 201.168 m | furlong , inch | 0.0254 m | , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , mi | 1609.34 m | mile , micron | 1e-06 m | , mil | 2.54e-05 m | thou , nmi | 1852 m | nauticalmile, NM , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , yd | 0.9144 m | yard , ] """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further units requested get_current_unit_registry().add_enabled_units(units) return context def set_enabled_equivalencies(equivalencies): """ Sets the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. Examples -------- Exponentiation normally requires dimensionless quantities. To avoid problems with complex phases:: >>> from astropy import units as u >>> with u.set_enabled_equivalencies(u.dimensionless_angles()): ... phase = 0.5 * u.cycle ... np.exp(1j*phase) # doctest: +FLOAT_CMP <Quantity -1.+1.2246468e-16j> """ # get a context with a new registry, using all units of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the equivalencies requested get_current_unit_registry().set_enabled_equivalencies(equivalencies) return context def add_enabled_equivalencies(equivalencies): """ Adds to the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Since no equivalencies are enabled by default, generally it is recommended to use `set_enabled_equivalencies`. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().add_enabled_equivalencies(equivalencies) return context def set_enabled_aliases(aliases): """ Set aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Parameters ---------- aliases : dict of str, Unit The aliases to set. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().set_enabled_aliases(aliases) return context def add_enabled_aliases(aliases): """ Add aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Since no aliases are enabled by default, generally it is recommended to use `set_enabled_aliases`. Parameters ---------- aliases : dict of str, Unit The aliases to add. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().add_enabled_aliases(aliases) return context class UnitsError(Exception): """ The base class for unit-specific exceptions. """ class UnitScaleError(UnitsError, ValueError): """ Used to catch the errors involving scaled units, which are not recognized by FITS format. """ pass class UnitConversionError(UnitsError, ValueError): """ Used specifically for errors related to converting between units or interpreting units in terms of other units. """ class UnitTypeError(UnitsError, TypeError): """ Used specifically for errors in setting to units not allowed by a class. E.g., would be raised if the unit of an `~astropy.coordinates.Angle` instances were set to a non-angular unit. """ class UnitsWarning(AstropyWarning): """ The base class for unit-specific warnings. """ class UnitBase: """ Abstract base class for units. Most of the arithmetic operations on units are defined in this base class. Should not be instantiated by users directly. """ # Make sure that __rmul__ of units gets called over the __mul__ of Numpy # arrays to avoid element-wise multiplication. __array_priority__ = 1000 _hash = None def __deepcopy__(self, memo): # This may look odd, but the units conversion will be very # broken after deep-copying if we don't guarantee that a given # physical unit corresponds to only one instance return self def _repr_latex_(self): """ Generate latex representation of unit name. This is used by the IPython notebook to print a unit with a nice layout. Returns ------- Latex string """ return unit_format.Latex.to_string(self) def __bytes__(self): """Return string representation for unit""" return unit_format.Generic.to_string(self).encode('unicode_escape') def __str__(self): """Return string representation for unit""" return unit_format.Generic.to_string(self) def __repr__(self): string = unit_format.Generic.to_string(self) return f'Unit("{string}")' def _get_physical_type_id(self): """ Returns an identifier that uniquely identifies the physical type of this unit. It is comprised of the bases and powers of this unit, without the scale. Since it is hashable, it is useful as a dictionary key. """ unit = self.decompose() r = zip([x.name for x in unit.bases], unit.powers) # bases and powers are already sorted in a unique way # r.sort() r = tuple(r) return r @property def names(self): """ Returns all of the names associated with this unit. """ raise AttributeError( "Can not get names from unnamed units. " "Perhaps you meant to_string()?") @property def name(self): """ Returns the canonical (short) name associated with this unit. """ raise AttributeError( "Can not get names from unnamed units. " "Perhaps you meant to_string()?") @property def aliases(self): """ Returns the alias (long) names for this unit. """ raise AttributeError( "Can not get aliases from unnamed units. " "Perhaps you meant to_string()?") @property def scale(self): """ Return the scale of the unit. """ return 1.0 @property def bases(self): """ Return the bases of the unit. """ return [self] @property def powers(self): """ Return the powers of the unit. """ return [1] def to_string(self, format=unit_format.Generic): """ Output the unit in the given format as a string. Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to the generic format. """ f = unit_format.get_format(format) return f.to_string(self) def __format__(self, format_spec): """Try to format units using a formatter.""" try: return self.to_string(format=format_spec) except ValueError: return format(str(self), format_spec) @staticmethod def _normalize_equivalencies(equivalencies): """ Normalizes equivalencies, ensuring each is a 4-tuple of the form:: (from_unit, to_unit, forward_func, backward_func) Parameters ---------- equivalencies : list of equivalency pairs, or None Returns ------- A normalized list, including possible global defaults set by, e.g., `set_enabled_equivalencies`, except when `equivalencies`=`None`, in which case the returned list is always empty. Raises ------ ValueError if an equivalency cannot be interpreted """ normalized = _normalize_equivalencies(equivalencies) if equivalencies is not None: normalized += get_current_unit_registry().equivalencies return normalized def __pow__(self, p): p = validate_power(p) return CompositeUnit(1, [self], [p], _error_check=False) def __truediv__(self, m): if isinstance(m, (bytes, str)): m = Unit(m) if isinstance(m, UnitBase): if m.is_unity(): return self return CompositeUnit(1, [self, m], [1, -1], _error_check=False) try: # Cannot handle this as Unit, re-try as Quantity from .quantity import Quantity return Quantity(1, self) / m except TypeError: return NotImplemented def __rtruediv__(self, m): if isinstance(m, (bytes, str)): return Unit(m) / self try: # Cannot handle this as Unit. Here, m cannot be a Quantity, # so we make it into one, fasttracking when it does not have a # unit, for the common case of <array> / <unit>. from .quantity import Quantity if hasattr(m, 'unit'): result = Quantity(m) result /= self return result else: return Quantity(m, self**(-1)) except TypeError: return NotImplemented def __mul__(self, m): if isinstance(m, (bytes, str)): m = Unit(m) if isinstance(m, UnitBase): if m.is_unity(): return self elif self.is_unity(): return m return CompositeUnit(1, [self, m], [1, 1], _error_check=False) # Cannot handle this as Unit, re-try as Quantity. try: from .quantity import Quantity return Quantity(1, self) * m except TypeError: return NotImplemented def __rmul__(self, m): if isinstance(m, (bytes, str)): return Unit(m) * self # Cannot handle this as Unit. Here, m cannot be a Quantity, # so we make it into one, fasttracking when it does not have a unit # for the common case of <array> * <unit>. try: from .quantity import Quantity if hasattr(m, 'unit'): result = Quantity(m) result *= self return result else: return Quantity(m, self) except TypeError: return NotImplemented def __rlshift__(self, m): try: from .quantity import Quantity return Quantity(m, self, copy=False, subok=True) except Exception: return NotImplemented def __rrshift__(self, m): warnings.warn(">> is not implemented. Did you mean to convert " "to a Quantity with unit {} using '<<'?".format(self), AstropyWarning) return NotImplemented def __hash__(self): if self._hash is None: parts = ([str(self.scale)] + [x.name for x in self.bases] + [str(x) for x in self.powers]) self._hash = hash(tuple(parts)) return self._hash def __getstate__(self): # If we get pickled, we should *not* store the memoized hash since # hashes of strings vary between sessions. state = self.__dict__.copy() state.pop('_hash', None) return state def __eq__(self, other): if self is other: return True try: other = Unit(other, parse_strict='silent') except (ValueError, UnitsError, TypeError): return NotImplemented # Other is unit-like, but the test below requires it is a UnitBase # instance; if it is not, give up (so that other can try). if not isinstance(other, UnitBase): return NotImplemented try: return is_effectively_unity(self._to(other)) except UnitsError: return False def __ne__(self, other): return not (self == other) def __le__(self, other): scale = self._to(Unit(other)) return scale <= 1. or is_effectively_unity(scale) def __ge__(self, other): scale = self._to(Unit(other)) return scale >= 1. or is_effectively_unity(scale) def __lt__(self, other): return not (self >= other) def __gt__(self, other): return not (self <= other) def __neg__(self): return self * -1. def is_equivalent(self, other, equivalencies=[]): """ Returns `True` if this unit is equivalent to ``other``. Parameters ---------- other : `~astropy.units.Unit`, str, or tuple The unit to convert to. If a tuple of units is specified, this method returns true if the unit matches any of those in the tuple. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible. See :ref:`astropy:unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. Returns ------- bool """ equivalencies = self._normalize_equivalencies(equivalencies) if isinstance(other, tuple): return any(self.is_equivalent(u, equivalencies=equivalencies) for u in other) other = Unit(other, parse_strict='silent') return self._is_equivalent(other, equivalencies) def _is_equivalent(self, other, equivalencies=[]): """Returns `True` if this unit is equivalent to `other`. See `is_equivalent`, except that a proper Unit object should be given (i.e., no string) and that the equivalency list should be normalized using `_normalize_equivalencies`. """ if isinstance(other, UnrecognizedUnit): return False if (self._get_physical_type_id() == other._get_physical_type_id()): return True elif len(equivalencies): unit = self.decompose() other = other.decompose() for a, b, forward, backward in equivalencies: if b is None: # after canceling, is what's left convertible # to dimensionless (according to the equivalency)? try: (other/unit).decompose([a]) return True except Exception: pass else: if(a._is_equivalent(unit) and b._is_equivalent(other) or b._is_equivalent(unit) and a._is_equivalent(other)): return True return False def _apply_equivalencies(self, unit, other, equivalencies): """ Internal function (used from `_get_converter`) to apply equivalence pairs. """ def make_converter(scale1, func, scale2): def convert(v): return func(_condition_arg(v) / scale1) * scale2 return convert for funit, tunit, a, b in equivalencies: if tunit is None: try: ratio_in_funit = (other.decompose() / unit.decompose()).decompose([funit]) return make_converter(ratio_in_funit.scale, a, 1.) except UnitsError: pass else: try: scale1 = funit._to(unit) scale2 = tunit._to(other) return make_converter(scale1, a, scale2) except UnitsError: pass try: scale1 = tunit._to(unit) scale2 = funit._to(other) return make_converter(scale1, b, scale2) except UnitsError: pass def get_err_str(unit): unit_str = unit.to_string('unscaled') physical_type = unit.physical_type if physical_type != 'unknown': unit_str = f"'{unit_str}' ({physical_type})" else: unit_str = f"'{unit_str}'" return unit_str unit_str = get_err_str(unit) other_str = get_err_str(other) raise UnitConversionError( f"{unit_str} and {other_str} are not convertible") def _get_converter(self, other, equivalencies=[]): """Get a converter for values in ``self`` to ``other``. If no conversion is necessary, returns ``unit_scale_converter`` (which is used as a check in quantity helpers). """ # First see if it is just a scaling. try: scale = self._to(other) except UnitsError: pass else: if scale == 1.: return unit_scale_converter else: return lambda val: scale * _condition_arg(val) # if that doesn't work, maybe we can do it with equivalencies? try: return self._apply_equivalencies( self, other, self._normalize_equivalencies(equivalencies)) except UnitsError as exc: # Last hope: maybe other knows how to do it? # We assume the equivalencies have the unit itself as first item. # TODO: maybe better for other to have a `_back_converter` method? if hasattr(other, 'equivalencies'): for funit, tunit, a, b in other.equivalencies: if other is funit: try: return lambda v: b(self._get_converter( tunit, equivalencies=equivalencies)(v)) except Exception: pass raise exc def _to(self, other): """ Returns the scale to the specified unit. See `to`, except that a Unit object should be given (i.e., no string), and that all defaults are used, i.e., no equivalencies and value=1. """ # There are many cases where we just want to ensure a Quantity is # of a particular unit, without checking whether it's already in # a particular unit. If we're being asked to convert from a unit # to itself, we can short-circuit all of this. if self is other: return 1.0 # Don't presume decomposition is possible; e.g., # conversion to function units is through equivalencies. if isinstance(other, UnitBase): self_decomposed = self.decompose() other_decomposed = other.decompose() # Check quickly whether equivalent. This is faster than # `is_equivalent`, because it doesn't generate the entire # physical type list of both units. In other words it "fails # fast". if(self_decomposed.powers == other_decomposed.powers and all(self_base is other_base for (self_base, other_base) in zip(self_decomposed.bases, other_decomposed.bases))): return self_decomposed.scale / other_decomposed.scale raise UnitConversionError( f"'{self!r}' is not a scaled version of '{other!r}'") def to(self, other, value=UNITY, equivalencies=[]): """ Return the converted values in the specified unit. Parameters ---------- other : unit-like The unit to convert to. value : int, float, or scalar array-like, optional Value(s) in the current unit to be converted to the specified unit. If not provided, defaults to 1.0 equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible. See :ref:`astropy:unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. Returns ------- values : scalar or array Converted value(s). Input value sequences are returned as numpy arrays. Raises ------ UnitsError If units are inconsistent """ if other is self and value is UNITY: return UNITY else: return self._get_converter(Unit(other), equivalencies=equivalencies)(value) def in_units(self, other, value=1.0, equivalencies=[]): """ Alias for `to` for backward compatibility with pynbody. """ return self.to( other, value=value, equivalencies=equivalencies) def decompose(self, bases=set()): """ Return a unit object composed of only irreducible units. Parameters ---------- bases : sequence of UnitBase, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `UnitsError` if it's not possible to do so. Returns ------- unit : `~astropy.units.CompositeUnit` New object containing only irreducible unit objects. """ raise NotImplementedError() def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0, cached_results=None): def is_final_result(unit): # Returns True if this result contains only the expected # units for base in unit.bases: if base not in namespace: return False return True unit = self.decompose() key = hash(unit) cached = cached_results.get(key) if cached is not None: if isinstance(cached, Exception): raise cached return cached # Prevent too many levels of recursion # And special case for dimensionless unit if depth >= max_depth: cached_results[key] = [unit] return [unit] # Make a list including all of the equivalent units units = [unit] for funit, tunit, a, b in equivalencies: if tunit is not None: if self._is_equivalent(funit): scale = funit.decompose().scale / unit.scale units.append(Unit(a(1.0 / scale) * tunit).decompose()) elif self._is_equivalent(tunit): scale = tunit.decompose().scale / unit.scale units.append(Unit(b(1.0 / scale) * funit).decompose()) else: if self._is_equivalent(funit): units.append(Unit(unit.scale)) # Store partial results partial_results = [] # Store final results that reduce to a single unit or pair of # units if len(unit.bases) == 0: final_results = [set([unit]), set()] else: final_results = [set(), set()] for tunit in namespace: tunit_decomposed = tunit.decompose() for u in units: # If the unit is a base unit, look for an exact match # to one of the bases of the target unit. If found, # factor by the same power as the target unit's base. # This allows us to factor out fractional powers # without needing to do an exhaustive search. if len(tunit_decomposed.bases) == 1: for base, power in zip(u.bases, u.powers): if tunit_decomposed._is_equivalent(base): tunit = tunit ** power tunit_decomposed = tunit_decomposed ** power break composed = (u / tunit_decomposed).decompose() factored = composed * tunit len_bases = len(composed.bases) if is_final_result(factored) and len_bases <= 1: final_results[len_bases].add(factored) else: partial_results.append( (len_bases, composed, tunit)) # Do we have any minimal results? for final_result in final_results: if len(final_result): results = final_results[0].union(final_results[1]) cached_results[key] = results return results partial_results.sort(key=operator.itemgetter(0)) # ...we have to recurse and try to further compose results = [] for len_bases, composed, tunit in partial_results: try: composed_list = composed._compose( equivalencies=equivalencies, namespace=namespace, max_depth=max_depth, depth=depth + 1, cached_results=cached_results) except UnitsError: composed_list = [] for subcomposed in composed_list: results.append( (len(subcomposed.bases), subcomposed, tunit)) if len(results): results.sort(key=operator.itemgetter(0)) min_length = results[0][0] subresults = set() for len_bases, composed, tunit in results: if len_bases > min_length: break else: factored = composed * tunit if is_final_result(factored): subresults.add(factored) if len(subresults): cached_results[key] = subresults return subresults if not is_final_result(self): result = UnitsError( f"Cannot represent unit {self} in terms of the given units") cached_results[key] = result raise result cached_results[key] = [self] return [self] def compose(self, equivalencies=[], units=None, max_depth=2, include_prefix_units=None): """ Return the simplest possible composite unit(s) that represent the given unit. Since there may be multiple equally simple compositions of the unit, a list of units is always returned. Parameters ---------- equivalencies : list of tuple A list of equivalence pairs to also list. See :ref:`astropy:unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. units : set of `~astropy.units.Unit`, optional If not provided, any known units may be used to compose into. Otherwise, ``units`` is a dict, module or sequence containing the units to compose into. max_depth : int, optional The maximum recursion depth to use when composing into composite units. include_prefix_units : bool, optional When `True`, include prefixed units in the result. Default is `True` if a sequence is passed in to ``units``, `False` otherwise. Returns ------- units : list of `CompositeUnit` A list of candidate compositions. These will all be equally simple, but it may not be possible to automatically determine which of the candidates are better. """ # if units parameter is specified and is a sequence (list|tuple), # include_prefix_units is turned on by default. Ex: units=[u.kpc] if include_prefix_units is None: include_prefix_units = isinstance(units, (list, tuple)) # Pre-normalize the equivalencies list equivalencies = self._normalize_equivalencies(equivalencies) # The namespace of units to compose into should be filtered to # only include units with bases in common with self, otherwise # they can't possibly provide useful results. Having too many # destination units greatly increases the search space. def has_bases_in_common(a, b): if len(a.bases) == 0 and len(b.bases) == 0: return True for ab in a.bases: for bb in b.bases: if ab == bb: return True return False def has_bases_in_common_with_equiv(unit, other): if has_bases_in_common(unit, other): return True for funit, tunit, a, b in equivalencies: if tunit is not None: if unit._is_equivalent(funit): if has_bases_in_common(tunit.decompose(), other): return True elif unit._is_equivalent(tunit): if has_bases_in_common(funit.decompose(), other): return True else: if unit._is_equivalent(funit): if has_bases_in_common(dimensionless_unscaled, other): return True return False def filter_units(units): filtered_namespace = set() for tunit in units: if (isinstance(tunit, UnitBase) and (include_prefix_units or not isinstance(tunit, PrefixUnit)) and has_bases_in_common_with_equiv( decomposed, tunit.decompose())): filtered_namespace.add(tunit) return filtered_namespace decomposed = self.decompose() if units is None: units = filter_units(self._get_units_with_same_physical_type( equivalencies=equivalencies)) if len(units) == 0: units = get_current_unit_registry().non_prefix_units elif isinstance(units, dict): units = set(filter_units(units.values())) elif inspect.ismodule(units): units = filter_units(vars(units).values()) else: units = filter_units(_flatten_units_collection(units)) def sort_results(results): if not len(results): return [] # Sort the results so the simplest ones appear first. # Simplest is defined as "the minimum sum of absolute # powers" (i.e. the fewest bases), and preference should # be given to results where the sum of powers is positive # and the scale is exactly equal to 1.0 results = list(results) results.sort(key=lambda x: np.abs(x.scale)) results.sort(key=lambda x: np.sum(np.abs(x.powers))) results.sort(key=lambda x: np.sum(x.powers) < 0.0) results.sort(key=lambda x: not is_effectively_unity(x.scale)) last_result = results[0] filtered = [last_result] for result in results[1:]: if str(result) != str(last_result): filtered.append(result) last_result = result return filtered return sort_results(self._compose( equivalencies=equivalencies, namespace=units, max_depth=max_depth, depth=0, cached_results={})) def to_system(self, system): """ Converts this unit into ones belonging to the given system. Since more than one result may be possible, a list is always returned. Parameters ---------- system : module The module that defines the unit system. Commonly used ones include `astropy.units.si` and `astropy.units.cgs`. To use your own module it must contain unit objects and a sequence member named ``bases`` containing the base units of the system. Returns ------- units : list of `CompositeUnit` The list is ranked so that units containing only the base units of that system will appear first. """ bases = set(system.bases) def score(compose): # In case that compose._bases has no elements we return # 'np.inf' as 'score value'. It does not really matter which # number we would return. This case occurs for instance for # dimensionless quantities: compose_bases = compose.bases if len(compose_bases) == 0: return np.inf else: sum = 0 for base in compose_bases: if base in bases: sum += 1 return sum / float(len(compose_bases)) x = self.decompose(bases=bases) composed = x.compose(units=system) composed = sorted(composed, key=score, reverse=True) return composed @lazyproperty def si(self): """ Returns a copy of the current `Unit` instance in SI units. """ from . import si return self.to_system(si)[0] @lazyproperty def cgs(self): """ Returns a copy of the current `Unit` instance with CGS units. """ from . import cgs return self.to_system(cgs)[0] @property def physical_type(self): """ Physical type(s) dimensionally compatible with the unit. Returns ------- `~astropy.units.physical.PhysicalType` A representation of the physical type(s) of a unit. Examples -------- >>> from astropy import units as u >>> u.m.physical_type PhysicalType('length') >>> (u.m ** 2 / u.s).physical_type PhysicalType({'diffusivity', 'kinematic viscosity'}) Physical types can be compared to other physical types (recommended in packages) or to strings. >>> area = (u.m ** 2).physical_type >>> area == u.m.physical_type ** 2 True >>> area == "area" True `~astropy.units.physical.PhysicalType` objects can be used for dimensional analysis. >>> number_density = u.m.physical_type ** -3 >>> velocity = (u.m / u.s).physical_type >>> number_density * velocity PhysicalType('particle flux') """ from . import physical return physical.get_physical_type(self) def _get_units_with_same_physical_type(self, equivalencies=[]): """ Return a list of registered units with the same physical type as this unit. This function is used by Quantity to add its built-in conversions to equivalent units. This is a private method, since end users should be encouraged to use the more powerful `compose` and `find_equivalent_units` methods (which use this under the hood). Parameters ---------- equivalencies : list of tuple A list of equivalence pairs to also pull options from. See :ref:`astropy:unit_equivalencies`. It must already be normalized using `_normalize_equivalencies`. """ unit_registry = get_current_unit_registry() units = set(unit_registry.get_units_with_physical_type(self)) for funit, tunit, a, b in equivalencies: if tunit is not None: if self.is_equivalent(funit) and tunit not in units: units.update( unit_registry.get_units_with_physical_type(tunit)) if self._is_equivalent(tunit) and funit not in units: units.update( unit_registry.get_units_with_physical_type(funit)) else: if self.is_equivalent(funit): units.add(dimensionless_unscaled) return units class EquivalentUnitsList(list): """ A class to handle pretty-printing the result of `find_equivalent_units`. """ HEADING_NAMES = ('Primary name', 'Unit definition', 'Aliases') ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant NO_EQUIV_UNITS_MSG = 'There are no equivalent units' def __repr__(self): if len(self) == 0: return self.NO_EQUIV_UNITS_MSG else: lines = self._process_equivalent_units(self) lines.insert(0, self.HEADING_NAMES) widths = [0] * self.ROW_LEN for line in lines: for i, col in enumerate(line): widths[i] = max(widths[i], len(col)) f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths) lines = [f.format(*line) for line in lines] lines = (lines[0:1] + ['['] + [f'{x} ,' for x in lines[1:]] + [']']) return '\n'.join(lines) def _repr_html_(self): """ Outputs a HTML table representation within Jupyter notebooks. """ if len(self) == 0: return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>" else: # HTML tags to use to compose the table in HTML blank_table = '<table style="width:50%">{}</table>' blank_row_container = "<tr>{}</tr>" heading_row_content = "<th>{}</th>" * self.ROW_LEN data_row_content = "<td>{}</td>" * self.ROW_LEN # The HTML will be rendered & the table is simple, so don't # bother to include newlines & indentation for the HTML code. heading_row = blank_row_container.format( heading_row_content.format(*self.HEADING_NAMES)) data_rows = self._process_equivalent_units(self) all_rows = heading_row for row in data_rows: html_row = blank_row_container.format( data_row_content.format(*row)) all_rows += html_row return blank_table.format(all_rows) @staticmethod def _process_equivalent_units(equiv_units_data): """ Extract attributes, and sort, the equivalent units pre-formatting. """ processed_equiv_units = [] for u in equiv_units_data: irred = u.decompose().to_string() if irred == u.name: irred = 'irreducible' processed_equiv_units.append( (u.name, irred, ', '.join(u.aliases))) processed_equiv_units.sort() return processed_equiv_units def find_equivalent_units(self, equivalencies=[], units=None, include_prefix_units=False): """ Return a list of all the units that are the same type as ``self``. Parameters ---------- equivalencies : list of tuple A list of equivalence pairs to also list. See :ref:`astropy:unit_equivalencies`. Any list given, including an empty one, supersedes global defaults that may be in effect (as set by `set_enabled_equivalencies`) units : set of `~astropy.units.Unit`, optional If not provided, all defined units will be searched for equivalencies. Otherwise, may be a dict, module or sequence containing the units to search for equivalencies. include_prefix_units : bool, optional When `True`, include prefixed units in the result. Default is `False`. Returns ------- units : list of `UnitBase` A list of unit objects that match ``u``. A subclass of `list` (``EquivalentUnitsList``) is returned that pretty-prints the list of units when output. """ results = self.compose( equivalencies=equivalencies, units=units, max_depth=1, include_prefix_units=include_prefix_units) results = set( x.bases[0] for x in results if len(x.bases) == 1) return self.EquivalentUnitsList(results) def is_unity(self): """ Returns `True` if the unit is unscaled and dimensionless. """ return False class NamedUnit(UnitBase): """ The base class of units that have a name. Parameters ---------- st : str, list of str, 2-tuple The name of the unit. If a list of strings, the first element is the canonical (short) name, and the rest of the elements are aliases. If a tuple of lists, the first element is a list of short names, and the second element is a list of long names; all but the first short name are considered "aliases". Each name *should* be a valid Python identifier to make it easy to access, but this is not required. namespace : dict, optional When provided, inject the unit, and all of its aliases, in the given namespace dictionary. If a unit by the same name is already in the namespace, a ValueError is raised. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\\Omega'} Raises ------ ValueError If any of the given unit names are already in the registry. ValueError If any of the given unit names are not valid Python tokens. """ def __init__(self, st, doc=None, format=None, namespace=None): UnitBase.__init__(self) if isinstance(st, (bytes, str)): self._names = [st] self._short_names = [st] self._long_names = [] elif isinstance(st, tuple): if not len(st) == 2: raise ValueError("st must be string, list or 2-tuple") self._names = st[0] + [n for n in st[1] if n not in st[0]] if not len(self._names): raise ValueError("must provide at least one name") self._short_names = st[0][:] self._long_names = st[1][:] else: if len(st) == 0: raise ValueError( "st list must have at least one entry") self._names = st[:] self._short_names = [st[0]] self._long_names = st[1:] if format is None: format = {} self._format = format if doc is None: doc = self._generate_doc() else: doc = textwrap.dedent(doc) doc = textwrap.fill(doc) self.__doc__ = doc self._inject(namespace) def _generate_doc(self): """ Generate a docstring for the unit if the user didn't supply one. This is only used from the constructor and may be overridden in subclasses. """ names = self.names if len(self.names) > 1: return "{1} ({0})".format(*names[:2]) else: return names[0] def get_format_name(self, format): """ Get a name for this unit that is specific to a particular format. Uses the dictionary passed into the `format` kwarg in the constructor. Parameters ---------- format : str The name of the format Returns ------- name : str The name of the unit for the given format. """ return self._format.get(format, self.name) @property def names(self): """ Returns all of the names associated with this unit. """ return self._names @property def name(self): """ Returns the canonical (short) name associated with this unit. """ return self._names[0] @property def aliases(self): """ Returns the alias (long) names for this unit. """ return self._names[1:] @property def short_names(self): """ Returns all of the short names associated with this unit. """ return self._short_names @property def long_names(self): """ Returns all of the long names associated with this unit. """ return self._long_names def _inject(self, namespace=None): """ Injects the unit, and all of its aliases, in the given namespace dictionary. """ if namespace is None: return # Loop through all of the names first, to ensure all of them # are new, then add them all as a single "transaction" below. for name in self._names: if name in namespace and self != namespace[name]: raise ValueError( "Object with name {!r} already exists in " "given namespace ({!r}).".format( name, namespace[name])) for name in self._names: namespace[name] = self def _recreate_irreducible_unit(cls, names, registered): """ This is used to reconstruct units when passed around by multiprocessing. """ registry = get_current_unit_registry().registry if names[0] in registry: # If in local registry return that object. return registry[names[0]] else: # otherwise, recreate the unit. unit = cls(names) if registered: # If not in local registry but registered in origin registry, # enable unit in local registry. get_current_unit_registry().add_enabled_units([unit]) return unit class IrreducibleUnit(NamedUnit): """ Irreducible units are the units that all other units are defined in terms of. Examples are meters, seconds, kilograms, amperes, etc. There is only once instance of such a unit per type. """ def __reduce__(self): # When IrreducibleUnit objects are passed to other processes # over multiprocessing, they need to be recreated to be the # ones already in the subprocesses' namespace, not new # objects, or they will be considered "unconvertible". # Therefore, we have a custom pickler/unpickler that # understands how to recreate the Unit on the other side. registry = get_current_unit_registry().registry return (_recreate_irreducible_unit, (self.__class__, list(self.names), self.name in registry), self.__getstate__()) @property def represents(self): """The unit that this named unit represents. For an irreducible unit, that is always itself. """ return self def decompose(self, bases=set()): if len(bases) and self not in bases: for base in bases: try: scale = self._to(base) except UnitsError: pass else: if is_effectively_unity(scale): return base else: return CompositeUnit(scale, [base], [1], _error_check=False) raise UnitConversionError( f"Unit {self} can not be decomposed into the requested bases") return self class UnrecognizedUnit(IrreducibleUnit): """ A unit that did not parse correctly. This allows for round-tripping it as a string, but no unit operations actually work on it. Parameters ---------- st : str The name of the unit. """ # For UnrecognizedUnits, we want to use "standard" Python # pickling, not the special case that is used for # IrreducibleUnits. __reduce__ = object.__reduce__ def __repr__(self): return f"UnrecognizedUnit({str(self)})" def __bytes__(self): return self.name.encode('ascii', 'replace') def __str__(self): return self.name def to_string(self, format=None): return self.name def _unrecognized_operator(self, *args, **kwargs): raise ValueError( "The unit {!r} is unrecognized, so all arithmetic operations " "with it are invalid.".format(self.name)) __pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = __lt__ = \ __gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator def __eq__(self, other): try: other = Unit(other, parse_strict='silent') except (ValueError, UnitsError, TypeError): return NotImplemented return isinstance(other, type(self)) and self.name == other.name def __ne__(self, other): return not (self == other) def is_equivalent(self, other, equivalencies=None): self._normalize_equivalencies(equivalencies) return self == other def _get_converter(self, other, equivalencies=None): self._normalize_equivalencies(equivalencies) raise ValueError( "The unit {!r} is unrecognized. It can not be converted " "to other units.".format(self.name)) def get_format_name(self, format): return self.name def is_unity(self): return False class _UnitMetaClass(type): """ This metaclass exists because the Unit constructor should sometimes return instances that already exist. This "overrides" the constructor before the new instance is actually created, so we can return an existing one. """ def __call__(self, s="", represents=None, format=None, namespace=None, doc=None, parse_strict='raise'): # Short-circuit if we're already a unit if hasattr(s, '_get_physical_type_id'): return s # turn possible Quantity input for s or represents into a Unit from .quantity import Quantity if isinstance(represents, Quantity): if is_effectively_unity(represents.value): represents = represents.unit else: represents = CompositeUnit(represents.value * represents.unit.scale, bases=represents.unit.bases, powers=represents.unit.powers, _error_check=False) if isinstance(s, Quantity): if is_effectively_unity(s.value): s = s.unit else: s = CompositeUnit(s.value * s.unit.scale, bases=s.unit.bases, powers=s.unit.powers, _error_check=False) # now decide what we really need to do; define derived Unit? if isinstance(represents, UnitBase): # This has the effect of calling the real __new__ and # __init__ on the Unit class. return super().__call__( s, represents, format=format, namespace=namespace, doc=doc) # or interpret a Quantity (now became unit), string or number? if isinstance(s, UnitBase): return s elif isinstance(s, (bytes, str)): if len(s.strip()) == 0: # Return the NULL unit return dimensionless_unscaled if format is None: format = unit_format.Generic f = unit_format.get_format(format) if isinstance(s, bytes): s = s.decode('ascii') try: return f.parse(s) except NotImplementedError: raise except Exception as e: if parse_strict == 'silent': pass else: # Deliberately not issubclass here. Subclasses # should use their name. if f is not unit_format.Generic: format_clause = f.name + ' ' else: format_clause = '' msg = ("'{}' did not parse as {}unit: {} " "If this is meant to be a custom unit, " "define it with 'u.def_unit'. To have it " "recognized inside a file reader or other code, " "enable it with 'u.add_enabled_units'. " "For details, see " "https://docs.astropy.org/en/latest/units/combining_and_defining.html" .format(s, format_clause, str(e))) if parse_strict == 'raise': raise ValueError(msg) elif parse_strict == 'warn': warnings.warn(msg, UnitsWarning) else: raise ValueError("'parse_strict' must be 'warn', " "'raise' or 'silent'") return UnrecognizedUnit(s) elif isinstance(s, (int, float, np.floating, np.integer)): return CompositeUnit(s, [], [], _error_check=False) elif isinstance(s, tuple): from .structured import StructuredUnit return StructuredUnit(s) elif s is None: raise TypeError("None is not a valid Unit") else: raise TypeError(f"{s} can not be converted to a Unit") class Unit(NamedUnit, metaclass=_UnitMetaClass): """ The main unit class. There are a number of different ways to construct a Unit, but always returns a `UnitBase` instance. If the arguments refer to an already-existing unit, that existing unit instance is returned, rather than a new one. - From a string:: Unit(s, format=None, parse_strict='silent') Construct from a string representing a (possibly compound) unit. The optional `format` keyword argument specifies the format the string is in, by default ``"generic"``. For a description of the available formats, see `astropy.units.format`. The optional ``parse_strict`` keyword controls what happens when an unrecognized unit string is passed in. It may be one of the following: - ``'raise'``: (default) raise a ValueError exception. - ``'warn'``: emit a Warning, and return an `UnrecognizedUnit` instance. - ``'silent'``: return an `UnrecognizedUnit` instance. - From a number:: Unit(number) Creates a dimensionless unit. - From a `UnitBase` instance:: Unit(unit) Returns the given unit unchanged. - From no arguments:: Unit() Returns the dimensionless unit. - The last form, which creates a new `Unit` is described in detail below. See also: https://docs.astropy.org/en/stable/units/ Parameters ---------- st : str or list of str The name of the unit. If a list, the first element is the canonical (short) name, and the rest of the elements are aliases. represents : UnitBase instance The unit that this named unit represents. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\\Omega'} namespace : dict, optional When provided, inject the unit (and all of its aliases) into the given namespace. Raises ------ ValueError If any of the given unit names are already in the registry. ValueError If any of the given unit names are not valid Python tokens. """ def __init__(self, st, represents=None, doc=None, format=None, namespace=None): represents = Unit(represents) self._represents = represents NamedUnit.__init__(self, st, namespace=namespace, doc=doc, format=format) @property def represents(self): """The unit that this named unit represents.""" return self._represents def decompose(self, bases=set()): return self._represents.decompose(bases=bases) def is_unity(self): return self._represents.is_unity() def __hash__(self): if self._hash is None: self._hash = hash((self.name, self._represents)) return self._hash @classmethod def _from_physical_type_id(cls, physical_type_id): # get string bases and powers from the ID tuple bases = [cls(base) for base, _ in physical_type_id] powers = [power for _, power in physical_type_id] if len(physical_type_id) == 1 and powers[0] == 1: unit = bases[0] else: unit = CompositeUnit(1, bases, powers, _error_check=False) return unit class PrefixUnit(Unit): """ A unit that is simply a SI-prefixed version of another unit. For example, ``mm`` is a `PrefixUnit` of ``.001 * m``. The constructor is the same as for `Unit`. """ class CompositeUnit(UnitBase): """ Create a composite unit using expressions of previously defined units. Direct use of this class is not recommended. Instead use the factory function `Unit` and arithmetic operators to compose units. Parameters ---------- scale : number A scaling factor for the unit. bases : sequence of `UnitBase` A sequence of units this unit is composed of. powers : sequence of numbers A sequence of powers (in parallel with ``bases``) for each of the base units. """ _decomposed_cache = None def __init__(self, scale, bases, powers, decompose=False, decompose_bases=set(), _error_check=True): # There are many cases internal to astropy.units where we # already know that all the bases are Unit objects, and the # powers have been validated. In those cases, we can skip the # error checking for performance reasons. When the private # kwarg `_error_check` is False, the error checking is turned # off. if _error_check: for base in bases: if not isinstance(base, UnitBase): raise TypeError( "bases must be sequence of UnitBase instances") powers = [validate_power(p) for p in powers] if not decompose and len(bases) == 1 and powers[0] >= 0: # Short-cut; with one unit there's nothing to expand and gather, # as that has happened already when creating the unit. But do only # positive powers, since for negative powers we need to re-sort. unit = bases[0] power = powers[0] if power == 1: scale *= unit.scale self._bases = unit.bases self._powers = unit.powers elif power == 0: self._bases = [] self._powers = [] else: scale *= unit.scale ** power self._bases = unit.bases self._powers = [operator.mul(*resolve_fractions(p, power)) for p in unit.powers] self._scale = sanitize_scale(scale) else: # Regular case: use inputs as preliminary scale, bases, and powers, # then "expand and gather" identical bases, sanitize the scale, &c. self._scale = scale self._bases = bases self._powers = powers self._expand_and_gather(decompose=decompose, bases=decompose_bases) def __repr__(self): if len(self._bases): return super().__repr__() else: if self._scale != 1.0: return f'Unit(dimensionless with a scale of {self._scale})' else: return 'Unit(dimensionless)' @property def scale(self): """ Return the scale of the composite unit. """ return self._scale @property def bases(self): """ Return the bases of the composite unit. """ return self._bases @property def powers(self): """ Return the powers of the composite unit. """ return self._powers def _expand_and_gather(self, decompose=False, bases=set()): def add_unit(unit, power, scale): if bases and unit not in bases: for base in bases: try: scale *= unit._to(base) ** power except UnitsError: pass else: unit = base break if unit in new_parts: a, b = resolve_fractions(new_parts[unit], power) new_parts[unit] = a + b else: new_parts[unit] = power return scale new_parts = {} scale = self._scale for b, p in zip(self._bases, self._powers): if decompose and b not in bases: b = b.decompose(bases=bases) if isinstance(b, CompositeUnit): scale *= b._scale ** p for b_sub, p_sub in zip(b._bases, b._powers): a, b = resolve_fractions(p_sub, p) scale = add_unit(b_sub, a * b, scale) else: scale = add_unit(b, p, scale) new_parts = [x for x in new_parts.items() if x[1] != 0] new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', ''))) self._bases = [x[0] for x in new_parts] self._powers = [x[1] for x in new_parts] self._scale = sanitize_scale(scale) def __copy__(self): """ For compatibility with python copy module. """ return CompositeUnit(self._scale, self._bases[:], self._powers[:]) def decompose(self, bases=set()): if len(bases) == 0 and self._decomposed_cache is not None: return self._decomposed_cache for base in self.bases: if (not isinstance(base, IrreducibleUnit) or (len(bases) and base not in bases)): break else: if len(bases) == 0: self._decomposed_cache = self return self x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True, decompose_bases=bases) if len(bases) == 0: self._decomposed_cache = x return x def is_unity(self): unit = self.decompose() return len(unit.bases) == 0 and unit.scale == 1.0 si_prefixes = [ (['Y'], ['yotta'], 1e24), (['Z'], ['zetta'], 1e21), (['E'], ['exa'], 1e18), (['P'], ['peta'], 1e15), (['T'], ['tera'], 1e12), (['G'], ['giga'], 1e9), (['M'], ['mega'], 1e6), (['k'], ['kilo'], 1e3), (['h'], ['hecto'], 1e2), (['da'], ['deka', 'deca'], 1e1), (['d'], ['deci'], 1e-1), (['c'], ['centi'], 1e-2), (['m'], ['milli'], 1e-3), (['u'], ['micro'], 1e-6), (['n'], ['nano'], 1e-9), (['p'], ['pico'], 1e-12), (['f'], ['femto'], 1e-15), (['a'], ['atto'], 1e-18), (['z'], ['zepto'], 1e-21), (['y'], ['yocto'], 1e-24) ] binary_prefixes = [ (['Ki'], ['kibi'], 2. ** 10), (['Mi'], ['mebi'], 2. ** 20), (['Gi'], ['gibi'], 2. ** 30), (['Ti'], ['tebi'], 2. ** 40), (['Pi'], ['pebi'], 2. ** 50), (['Ei'], ['exbi'], 2. ** 60) ] def _add_prefixes(u, excludes=[], namespace=None, prefixes=False): """ Set up all of the standard metric prefixes for a unit. This function should not be used directly, but instead use the `prefixes` kwarg on `def_unit`. Parameters ---------- excludes : list of str, optional Any prefixes to exclude from creation to avoid namespace collisions. namespace : dict, optional When provided, inject the unit (and all of its aliases) into the given namespace dictionary. prefixes : list, optional When provided, it is a list of prefix definitions of the form: (short_names, long_tables, factor) """ if prefixes is True: prefixes = si_prefixes elif prefixes is False: prefixes = [] for short, full, factor in prefixes: names = [] format = {} for prefix in short: if prefix in excludes: continue for alias in u.short_names: names.append(prefix + alias) # This is a hack to use Greek mu as a prefix # for some formatters. if prefix == 'u': format['latex'] = r'\mu ' + u.get_format_name('latex') format['unicode'] = '\N{MICRO SIGN}' + u.get_format_name('unicode') for key, val in u._format.items(): format.setdefault(key, prefix + val) for prefix in full: if prefix in excludes: continue for alias in u.long_names: names.append(prefix + alias) if len(names): PrefixUnit(names, CompositeUnit(factor, [u], [1], _error_check=False), namespace=namespace, format=format) def def_unit(s, represents=None, doc=None, format=None, prefixes=False, exclude_prefixes=[], namespace=None): """ Factory function for defining new units. Parameters ---------- s : str or list of str The name of the unit. If a list, the first element is the canonical (short) name, and the rest of the elements are aliases. represents : UnitBase instance, optional The unit that this named unit represents. If not provided, a new `IrreducibleUnit` is created. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\\Omega'} prefixes : bool or list, optional When `True`, generate all of the SI prefixed versions of the unit as well. For example, for a given unit ``m``, will generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of prefix definitions of the form: (short_names, long_tables, factor) Default is `False`. This function always returns the base unit object, even if multiple scaled versions of the unit were created. exclude_prefixes : list of str, optional If any of the SI prefixes need to be excluded, they may be listed here. For example, ``Pa`` can be interpreted either as "petaannum" or "Pascal". Therefore, when defining the prefixes for ``a``, ``exclude_prefixes`` should be set to ``["P"]``. namespace : dict, optional When provided, inject the unit (and all of its aliases and prefixes), into the given namespace dictionary. Returns ------- unit : `~astropy.units.UnitBase` The newly-defined unit, or a matching unit that was already defined. """ if represents is not None: result = Unit(s, represents, namespace=namespace, doc=doc, format=format) else: result = IrreducibleUnit( s, namespace=namespace, doc=doc, format=format) if prefixes: _add_prefixes(result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes) return result def _condition_arg(value): """ Validate value is acceptable for conversion purposes. Will convert into an array if not a scalar, and can be converted into an array Parameters ---------- value : int or float value, or sequence of such values Returns ------- Scalar value or numpy array Raises ------ ValueError If value is not as expected """ if isinstance(value, (np.ndarray, float, int, complex, np.void)): return value avalue = np.array(value) if avalue.dtype.kind not in ['i', 'f', 'c']: raise ValueError("Value not scalar compatible or convertible to " "an int, float, or complex array") return avalue def unit_scale_converter(val): """Function that just multiplies the value by unity. This is a separate function so it can be recognized and discarded in unit conversion. """ return 1. * _condition_arg(val) dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False) # Abbreviation of the above, see #1980 one = dimensionless_unscaled # Maintain error in old location for backward compatibility # TODO: Is this still needed? Should there be a deprecation warning? unit_format.fits.UnitScaleError = UnitScaleError
010acb8739268afb39b57f68a20f26d00f041fc45ee6f0591da35fd4805716f5
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines SI prefixed units that are required by the VOUnit standard but that are rarely used in practice and liable to lead to confusion (such as ``msolMass`` for milli-solar mass). They are in a separate module from `astropy.units.deprecated` because they need to be enabled by default for `astropy.units` to parse compliant VOUnit strings. As a result, e.g., ``Unit('msolMass')`` will just work, but to access the unit directly, use ``astropy.units.required_by_vounit.msolMass`` instead of the more typical idiom possible for the non-prefixed unit, ``astropy.units.solMass``. """ _ns = globals() def _initialize_module(): # Local imports to avoid polluting top-level namespace from . import cgs from . import astrophys from .core import def_unit, _add_prefixes _add_prefixes(astrophys.solMass, namespace=_ns, prefixes=True) _add_prefixes(astrophys.solRad, namespace=_ns, prefixes=True) _add_prefixes(astrophys.solLum, namespace=_ns, prefixes=True) _initialize_module() ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import (generate_unit_summary as _generate_unit_summary, generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary) if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) __doc__ += _generate_prefixonly_unit_summary(globals()) def _enable(): """ Enable the VOUnit-required extra units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')`` idiom. """ # Local import to avoid cyclical import from .core import add_enabled_units # Local import to avoid polluting namespace import inspect return add_enabled_units(inspect.getmodule(_enable)) # Because these are VOUnit mandated units, they start enabled (which is why the # function is hidden). _enable()
39925d73352c68f7255c1d0d924280ef6398a56b59b5ccb3c0347e3e2b53f09f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains classes and functions for defining and converting between different physical units. This code is adapted from the `pynbody <https://github.com/pynbody/pynbody>`_ units module written by Andrew Pontzen, who has granted the Astropy project permission to use the code under a BSD license. """ # Lots of things to import - go from more basic to advanced, so that # whatever advanced ones need generally has been imported already; # this helps prevent circular imports and makes it easier to understand # where most time is spent (e.g., using python -X importtime). from .core import * from .quantity import * from . import si from . import cgs from . import astrophys from . import photometric from . import misc from .function import units as function_units from .si import * from .astrophys import * from .photometric import * from .cgs import * from .physical import * from .function.units import * from .misc import * from .equivalencies import * from .function.core import * from .function.logarithmic import * from .structured import * from .decorators import * del bases # Enable the set of default units. This notably does *not* include # Imperial units. set_enabled_units([si, cgs, astrophys, function_units, misc, photometric]) # ------------------------------------------------------------------------- def __getattr__(attr): if attr == "littleh": from astropy.units.astrophys import littleh return littleh elif attr == "with_H0": from astropy.units.equivalencies import with_H0 return with_H0 raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
a671682046f0a87fb4fee08f99dc39dc03225cc8bc17033841e75048b23dbe9d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the astrophysics-specific units. They are also available in the `astropy.units` namespace. """ from . import si from astropy.constants import si as _si from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes, set_enabled_units) # To ensure si units of the constants can be interpreted. set_enabled_units([si]) import numpy as _numpy _ns = globals() ########################################################################### # LENGTH def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True, doc="astronomical unit: approximately the mean Earth--Sun " "distance.") def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True, doc="parsec: approximately 3.26 light-years.") def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns, doc="Solar radius", prefixes=False, format={'latex': r'R_{\odot}', 'unicode': 'R\N{SUN}'}) def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'], _si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius", # LaTeX jupiter symbol requires wasysym format={'latex': r'R_{\rm J}', 'unicode': 'R\N{JUPITER}'}) def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns, prefixes=False, doc="Earth radius", # LaTeX earth symbol requires wasysym format={'latex': r'R_{\oplus}', 'unicode': 'R⊕'}) def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m), namespace=_ns, prefixes=True, doc="Light year") def_unit(['lsec', 'lightsecond'], (_si.c * si.s).to(si.m), namespace=_ns, prefixes=False, doc="Light second") ########################################################################### # MASS def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns, prefixes=False, doc="Solar mass", format={'latex': r'M_{\odot}', 'unicode': 'M\N{SUN}'}) def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'], _si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass", # LaTeX jupiter symbol requires wasysym format={'latex': r'M_{\rm J}', 'unicode': 'M\N{JUPITER}'}) def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns, prefixes=False, doc="Earth mass", # LaTeX earth symbol requires wasysym format={'latex': r'M_{\oplus}', 'unicode': 'M⊕'}) ########################################################################## # ENERGY # Here, explicitly convert the planck constant to 'eV s' since the constant # can override that to give a more precise value that takes into account # covariances between e and h. Eventually, this may also be replaced with # just `_si.Ryd.to(eV)`. def_unit(['Ry', 'rydberg'], (_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV), namespace=_ns, prefixes=True, doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg " "constant", format={'latex': r'R_{\infty}', 'unicode': 'R∞'}) ########################################################################### # ILLUMINATION def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns, prefixes=False, doc="Solar luminance", format={'latex': r'L_{\odot}', 'unicode': 'L\N{SUN}'}) ########################################################################### # SPECTRAL DENSITY def_unit((['ph', 'photon'], ['photon']), format={'ogip': 'photon', 'vounit': 'photon'}, namespace=_ns, prefixes=True) def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz, namespace=_ns, prefixes=True, doc="Jansky: spectral flux density") def_unit(['R', 'Rayleigh', 'rayleigh'], (1e10 / (4 * _numpy.pi)) * ph * si.m ** -2 * si.s ** -1 * si.sr ** -1, namespace=_ns, prefixes=True, doc="Rayleigh: photon flux") ########################################################################### # EVENTS def_unit((['ct', 'count'], ['count']), format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'}, namespace=_ns, prefixes=True, exclude_prefixes=['p']) def_unit(['adu'], namespace=_ns, prefixes=True) def_unit(['DN', 'dn'], namespace=_ns, prefixes=False) ########################################################################### # MISCELLANEOUS # Some of these are very FITS-specific and perhaps considered a mistake. # Maybe they should be moved into the FITS format class? # TODO: This is defined by the FITS standard as "relative to the sun". # Is that mass, volume, what? def_unit(['Sun'], namespace=_ns) def_unit(['chan'], namespace=_ns, prefixes=True) def_unit(['bin'], namespace=_ns, prefixes=True) def_unit(['beam'], namespace=_ns, prefixes=True) def_unit(['electron'], doc="Number of electrons", namespace=_ns, format={'latex': r'e^{-}', 'unicode': 'e⁻'}) ########################################################################### # CLEANUP del UnitBase del def_unit del si ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) # ------------------------------------------------------------------------- def __getattr__(attr): if attr == "littleh": import warnings from astropy.cosmology.units import littleh from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( ("`littleh` is deprecated from module `astropy.units.astrophys` " "since astropy 5.0 and may be removed in a future version. " "Use `astropy.cosmology.units.littleh` instead."), AstropyDeprecationWarning) return littleh raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
46d21d999a60a43886c5596edc5958a66299b51124ae7af43c8397e3ab1ac805
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Defines the physical types that correspond to different units.""" import numbers import warnings from . import core from . import si from . import astrophys from . import cgs from . import imperial # Need this for backward namespace compat, see issues 11975 and 11977 # noqa from . import misc from . import quantity from astropy.utils.exceptions import AstropyDeprecationWarning __all__ = ["def_physical_type", "get_physical_type", "PhysicalType"] _units_and_physical_types = [ (core.dimensionless_unscaled, "dimensionless"), (si.m, "length"), (si.m ** 2, "area"), (si.m ** 3, "volume"), (si.s, "time"), (si.rad, "angle"), (si.sr, "solid angle"), (si.m / si.s, {"speed", "velocity"}), (si.m / si.s ** 2, "acceleration"), (si.Hz, "frequency"), (si.g, "mass"), (si.mol, "amount of substance"), (si.K, "temperature"), (si.W * si.m ** -1 * si.K ** -1, "thermal conductivity"), (si.J * si.K ** -1, {"heat capacity", "entropy"}), (si.J * si.K ** -1 * si.kg ** -1, {"specific heat capacity", "specific entropy"}), (si.N, "force"), (si.J, {"energy", "work", "torque"}), (si.J * si.m ** -2 * si.s ** -1, {"energy flux", "irradiance"}), (si.Pa, {"pressure", "energy density", "stress"}), (si.W, {"power", "radiant flux"}), (si.kg * si.m ** -3, "mass density"), (si.m ** 3 / si.kg, "specific volume"), (si.mol / si.m ** 3, "molar concentration"), (si.m ** 3 / si.mol, "molar volume"), (si.kg * si.m / si.s, {"momentum", "impulse"}), (si.kg * si.m ** 2 / si.s, {"angular momentum", "action"}), (si.rad / si.s, {"angular speed", "angular velocity", "angular frequency"}), (si.rad / si.s ** 2, "angular acceleration"), (si.rad / si.m, "plate scale"), (si.g / (si.m * si.s), "dynamic viscosity"), (si.m ** 2 / si.s, {"diffusivity", "kinematic viscosity"}), (si.m ** -1, "wavenumber"), (si.m ** -2, "column density"), (si.A, "electrical current"), (si.C, "electrical charge"), (si.V, "electrical potential"), (si.Ohm, {"electrical resistance", "electrical impedance", "electrical reactance"}), (si.Ohm * si.m, "electrical resistivity"), (si.S, "electrical conductance"), (si.S / si.m, "electrical conductivity"), (si.F, "electrical capacitance"), (si.C * si.m, "electrical dipole moment"), (si.A / si.m ** 2, "electrical current density"), (si.V / si.m, "electrical field strength"), (si.C / si.m ** 2, {"electrical flux density", "surface charge density", "polarization density"}, ), (si.C / si.m ** 3, "electrical charge density"), (si.F / si.m, "permittivity"), (si.Wb, "magnetic flux"), (si.T, "magnetic flux density"), (si.A / si.m, "magnetic field strength"), (si.m ** 2 * si.A, "magnetic moment"), (si.H / si.m, {"electromagnetic field strength", "permeability"}), (si.H, "inductance"), (si.cd, "luminous intensity"), (si.lm, "luminous flux"), (si.lx, {"luminous emittance", "illuminance"}), (si.W / si.sr, "radiant intensity"), (si.cd / si.m ** 2, "luminance"), (si.m ** -3 * si.s ** -1, "volumetric rate"), (astrophys.Jy, "spectral flux density"), (si.W * si.m ** 2 * si.Hz ** -1, "surface tension"), (si.J * si.m ** -3 * si.s ** -1, {"spectral flux density wav", "power density"}), (astrophys.photon / si.Hz / si.cm ** 2 / si.s, "photon flux density"), (astrophys.photon / si.AA / si.cm ** 2 / si.s, "photon flux density wav"), (astrophys.R, "photon flux"), (misc.bit, "data quantity"), (misc.bit / si.s, "bandwidth"), (cgs.Franklin, "electrical charge (ESU)"), (cgs.statampere, "electrical current (ESU)"), (cgs.Biot, "electrical current (EMU)"), (cgs.abcoulomb, "electrical charge (EMU)"), (si.m * si.s ** -3, {"jerk", "jolt"}), (si.m * si.s ** -4, {"snap", "jounce"}), (si.m * si.s ** -5, "crackle"), (si.m * si.s ** -6, {"pop", "pounce"}), (si.K / si.m, "temperature gradient"), (si.J / si.kg, "specific energy"), (si.mol * si.m ** -3 * si.s ** -1, "reaction rate"), (si.kg * si.m ** 2, "moment of inertia"), (si.mol / si.s, "catalytic activity"), (si.J * si.K ** -1 * si.mol ** -1, "molar heat capacity"), (si.mol / si.kg, "molality"), (si.m * si.s, "absement"), (si.m * si.s ** 2, "absity"), (si.m ** 3 / si.s, "volumetric flow rate"), (si.s ** -2, "frequency drift"), (si.Pa ** -1, "compressibility"), (astrophys.electron * si.m ** -3, "electron density"), (astrophys.electron * si.m ** -2 * si.s ** -1, "electron flux"), (si.kg / si.m ** 2, "surface mass density"), (si.W / si.m ** 2 / si.sr, "radiance"), (si.J / si.mol, "chemical potential"), (si.kg / si.m, "linear density"), (si.H ** -1, "magnetic reluctance"), (si.W / si.K, "thermal conductance"), (si.K / si.W, "thermal resistance"), (si.K * si.m / si.W, "thermal resistivity"), (si.N / si.s, "yank"), (si.S * si.m ** 2 / si.mol, "molar conductivity"), (si.m ** 2 / si.V / si.s, "electrical mobility"), (si.lumen / si.W, "luminous efficacy"), (si.m ** 2 / si.kg, {"opacity", "mass attenuation coefficient"}), (si.kg * si.m ** -2 * si.s ** -1, {"mass flux", "momentum density"}), (si.m ** -3, "number density"), (si.m ** -2 * si.s ** -1, "particle flux"), ] _physical_unit_mapping = {} _unit_physical_mapping = {} _name_physical_mapping = {} # mapping from attribute-accessible name (no spaces, etc.) to the actual name. _attrname_physical_mapping = {} def _physical_type_from_str(name): """ Return the `PhysicalType` instance associated with the name of a physical type. """ if name == "unknown": raise ValueError("cannot uniquely identify an 'unknown' physical type.") elif name in _attrname_physical_mapping: return _attrname_physical_mapping[name] # convert attribute-accessible elif name in _name_physical_mapping: return _name_physical_mapping[name] else: raise ValueError(f"{name!r} is not a known physical type.") def _replace_temperatures_with_kelvin(unit): """ If a unit contains a temperature unit besides kelvin, then replace that unit with kelvin. Temperatures cannot be converted directly between K, °F, °C, and °Ra, in particular since there would be different conversions for T and ΔT. However, each of these temperatures each represents the physical type. Replacing the different temperature units with kelvin allows the physical type to be treated consistently. """ physical_type_id = unit._get_physical_type_id() physical_type_id_components = [] substitution_was_made = False for base, power in physical_type_id: if base in ["deg_F", "deg_C", "deg_R"]: base = "K" substitution_was_made = True physical_type_id_components.append((base, power)) if substitution_was_made: return core.Unit._from_physical_type_id(tuple(physical_type_id_components)) else: return unit def _standardize_physical_type_names(physical_type_input): """ Convert a string or `set` of strings into a `set` containing string representations of physical types. The strings provided in ``physical_type_input`` can each contain multiple physical types that are separated by a regular slash. Underscores are treated as spaces so that variable names could be identical to physical type names. """ if isinstance(physical_type_input, str): physical_type_input = {physical_type_input} standardized_physical_types = set() for ptype_input in physical_type_input: if not isinstance(ptype_input, str): raise ValueError(f"expecting a string, but got {ptype_input}") input_set = set(ptype_input.split("/")) processed_set = {s.strip().replace("_", " ") for s in input_set} standardized_physical_types |= processed_set return standardized_physical_types class PhysicalType: """ Represents the physical type(s) that are dimensionally compatible with a set of units. Instances of this class should be accessed through either `get_physical_type` or by using the `~astropy.units.core.UnitBase.physical_type` attribute of units. This class is not intended to be instantiated directly in user code. Parameters ---------- unit : `~astropy.units.Unit` The unit to be represented by the physical type. physical_types : `str` or `set` of `str` A `str` representing the name of the physical type of the unit, or a `set` containing strings that represent one or more names of physical types. Notes ----- A physical type will be considered equal to an equivalent `PhysicalType` instance (recommended) or a string that contains a name of the physical type. The latter method is not recommended in packages, as the names of some physical types may change in the future. To maintain backwards compatibility, two physical type names may be included in one string if they are separated with a slash (e.g., ``"momentum/impulse"``). String representations of physical types may include underscores instead of spaces. Examples -------- `PhysicalType` instances may be accessed via the `~astropy.units.core.UnitBase.physical_type` attribute of units. >>> import astropy.units as u >>> u.meter.physical_type PhysicalType('length') `PhysicalType` instances may also be accessed by calling `get_physical_type`. This function will accept a unit, a string containing the name of a physical type, or the number one. >>> u.get_physical_type(u.m ** -3) PhysicalType('number density') >>> u.get_physical_type("volume") PhysicalType('volume') >>> u.get_physical_type(1) PhysicalType('dimensionless') Some units are dimensionally compatible with multiple physical types. A pascal is intended to represent pressure and stress, but the unit decomposition is equivalent to that of energy density. >>> pressure = u.get_physical_type("pressure") >>> pressure PhysicalType({'energy density', 'pressure', 'stress'}) >>> 'energy density' in pressure True Physical types can be tested for equality against other physical type objects or against strings that may contain the name of a physical type. >>> area = (u.m ** 2).physical_type >>> area == u.barn.physical_type True >>> area == "area" True Multiplication, division, and exponentiation are enabled so that physical types may be used for dimensional analysis. >>> length = u.pc.physical_type >>> area = (u.cm ** 2).physical_type >>> length * area PhysicalType('volume') >>> area / length PhysicalType('length') >>> length ** 3 PhysicalType('volume') may also be performed using a string that contains the name of a physical type. >>> "length" * area PhysicalType('volume') >>> "area" / length PhysicalType('length') Unknown physical types are labelled as ``"unknown"``. >>> (u.s ** 13).physical_type PhysicalType('unknown') Dimensional analysis may be performed for unknown physical types too. >>> length_to_19th_power = (u.m ** 19).physical_type >>> length_to_20th_power = (u.m ** 20).physical_type >>> length_to_20th_power / length_to_19th_power PhysicalType('length') """ def __init__(self, unit, physical_types): self._unit = _replace_temperatures_with_kelvin(unit) self._physical_type_id = self._unit._get_physical_type_id() self._physical_type = _standardize_physical_type_names(physical_types) self._physical_type_list = sorted(self._physical_type) def __iter__(self): yield from self._physical_type_list def __getattr__(self, attr): # TODO: remove this whole method when accessing str attributes from # physical types is no longer supported # short circuit attribute accessed in __str__ to prevent recursion if attr == '_physical_type_list': super().__getattribute__(attr) self_str_attr = getattr(str(self), attr, None) if hasattr(str(self), attr): warning_message = ( f"support for accessing str attributes such as {attr!r} " "from PhysicalType instances is deprecated since 4.3 " "and will be removed in a subsequent release.") warnings.warn(warning_message, AstropyDeprecationWarning) return self_str_attr else: super().__getattribute__(attr) # to get standard error message def __eq__(self, other): """ Return `True` if ``other`` represents a physical type that is consistent with the physical type of the `PhysicalType` instance. """ if isinstance(other, PhysicalType): return self._physical_type_id == other._physical_type_id elif isinstance(other, str): other = _standardize_physical_type_names(other) return other.issubset(self._physical_type) else: return NotImplemented def __ne__(self, other): equality = self.__eq__(other) return not equality if isinstance(equality, bool) else NotImplemented def _name_string_as_ordered_set(self): return "{" + str(self._physical_type_list)[1:-1] + "}" def __repr__(self): if len(self._physical_type) == 1: names = "'" + self._physical_type_list[0] + "'" else: names = self._name_string_as_ordered_set() return f"PhysicalType({names})" def __str__(self): return "/".join(self._physical_type_list) @staticmethod def _dimensionally_compatible_unit(obj): """ Return a unit that corresponds to the provided argument. If a unit is passed in, return that unit. If a physical type (or a `str` with the name of a physical type) is passed in, return a unit that corresponds to that physical type. If the number equal to ``1`` is passed in, return a dimensionless unit. Otherwise, return `NotImplemented`. """ if isinstance(obj, core.UnitBase): return _replace_temperatures_with_kelvin(obj) elif isinstance(obj, PhysicalType): return obj._unit elif isinstance(obj, numbers.Real) and obj == 1: return core.dimensionless_unscaled elif isinstance(obj, str): return _physical_type_from_str(obj)._unit else: return NotImplemented def _dimensional_analysis(self, other, operation): other_unit = self._dimensionally_compatible_unit(other) if other_unit is NotImplemented: return NotImplemented other_unit = _replace_temperatures_with_kelvin(other_unit) new_unit = getattr(self._unit, operation)(other_unit) return new_unit.physical_type def __mul__(self, other): return self._dimensional_analysis(other, "__mul__") def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): return self._dimensional_analysis(other, "__truediv__") def __rtruediv__(self, other): other = self._dimensionally_compatible_unit(other) if other is NotImplemented: return NotImplemented return other.physical_type._dimensional_analysis(self, "__truediv__") def __pow__(self, power): return (self._unit ** power).physical_type def __hash__(self): return hash(self._physical_type_id) def __len__(self): return len(self._physical_type) # We need to prevent operations like where a Unit instance left # multiplies a PhysicalType instance from returning a `Quantity` # instance with a PhysicalType as the value. We can do this by # preventing np.array from casting a PhysicalType instance as # an object array. __array__ = None def def_physical_type(unit, name): """ Add a mapping between a unit and the corresponding physical type(s). If a physical type already exists for a unit, add new physical type names so long as those names are not already in use for other physical types. Parameters ---------- unit : `~astropy.units.Unit` The unit to be represented by the physical type. name : `str` or `set` of `str` A `str` representing the name of the physical type of the unit, or a `set` containing strings that represent one or more names of physical types. Raises ------ ValueError If a physical type name is already in use for another unit, or if attempting to name a unit as ``"unknown"``. """ physical_type_id = unit._get_physical_type_id() physical_type_names = _standardize_physical_type_names(name) if "unknown" in physical_type_names: raise ValueError("cannot uniquely define an unknown physical type") names_for_other_units = set(_unit_physical_mapping.keys()).difference( _physical_unit_mapping.get(physical_type_id, {})) names_already_in_use = physical_type_names & names_for_other_units if names_already_in_use: raise ValueError( f"the following physical type names are already in use: " f"{names_already_in_use}.") unit_already_in_use = physical_type_id in _physical_unit_mapping if unit_already_in_use: physical_type = _physical_unit_mapping[physical_type_id] physical_type_names |= set(physical_type) physical_type.__init__(unit, physical_type_names) else: physical_type = PhysicalType(unit, physical_type_names) _physical_unit_mapping[physical_type_id] = physical_type for ptype in physical_type: _unit_physical_mapping[ptype] = physical_type_id for ptype_name in physical_type_names: _name_physical_mapping[ptype_name] = physical_type # attribute-accessible name attr_name = ptype_name.replace(' ', '_').replace('(', '').replace(')', '') _attrname_physical_mapping[attr_name] = physical_type def get_physical_type(obj): """ Return the physical type that corresponds to a unit (or another physical type representation). Parameters ---------- obj : quantity-like or `~astropy.units.PhysicalType`-like An object that (implicitly or explicitly) has a corresponding physical type. This object may be a unit, a `~astropy.units.Quantity`, an object that can be converted to a `~astropy.units.Quantity` (such as a number or array), a string that contains a name of a physical type, or a `~astropy.units.PhysicalType` instance. Returns ------- `~astropy.units.PhysicalType` A representation of the physical type(s) of the unit. Examples -------- The physical type may be retrieved from a unit or a `~astropy.units.Quantity`. >>> import astropy.units as u >>> u.get_physical_type(u.meter ** -2) PhysicalType('column density') >>> u.get_physical_type(0.62 * u.barn * u.Mpc) PhysicalType('volume') The physical type may also be retrieved by providing a `str` that contains the name of a physical type. >>> u.get_physical_type("energy") PhysicalType({'energy', 'torque', 'work'}) Numbers and arrays of numbers correspond to a dimensionless physical type. >>> u.get_physical_type(1) PhysicalType('dimensionless') """ if isinstance(obj, PhysicalType): return obj if isinstance(obj, str): return _physical_type_from_str(obj) try: unit = obj if isinstance(obj, core.UnitBase) else quantity.Quantity(obj, copy=False).unit except TypeError as exc: raise TypeError(f"{obj} does not correspond to a physical type.") from exc unit = _replace_temperatures_with_kelvin(unit) physical_type_id = unit._get_physical_type_id() unit_has_known_physical_type = physical_type_id in _physical_unit_mapping if unit_has_known_physical_type: return _physical_unit_mapping[physical_type_id] else: return PhysicalType(unit, "unknown") # ------------------------------------------------------------------------------ # Script section creating the physical types and the documentation # define the physical types for unit, physical_type in _units_and_physical_types: def_physical_type(unit, physical_type) # For getting the physical types. def __getattr__(name): """Checks for physical types using lazy import. This also allows user-defined physical types to be accessible from the :mod:`astropy.units.physical` module. See `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_ Parameters ---------- name : str The name of the attribute in this module. If it is already defined, then this function is not called. Returns ------- ptype : `~astropy.units.physical.PhysicalType` Raises ------ AttributeError If the ``name`` does not correspond to a physical type """ if name in _attrname_physical_mapping: return _attrname_physical_mapping[name] raise AttributeError(f"module {__name__!r} has no attribute {name!r}") def __dir__(): """Return contents directory (__all__ + all physical type names).""" return list(set(__all__) | set(_attrname_physical_mapping.keys())) # This generates a docstring addition for this module that describes all of the # standard physical types defined here. if __doc__ is not None: doclines = [ ".. list-table:: Defined Physical Types", " :header-rows: 1", " :widths: 30 10 50", "", " * - Physical type", " - Unit", " - Other physical type(s) with same unit"] for name in sorted(_name_physical_mapping.keys()): physical_type = _name_physical_mapping[name] doclines.extend([ f" * - _`{name}`", f" - :math:`{physical_type._unit.to_string('latex')[1:-1]}`", f" - {', '.join([n for n in physical_type if n != name])}"]) __doc__ += '\n\n' + '\n'.join(doclines) del unit, physical_type
798e04161d76df505a2134d2a0639c178eeb668445a39d09c62316f3f6122d68
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Miscellaneous utilities for `astropy.units`. None of the functions in the module are meant for use outside of the package. """ import io import re from fractions import Fraction import numpy as np from numpy import finfo _float_finfo = finfo(float) # take float here to ensure comparison with another float is fast # give a little margin since often multiple calculations happened _JUST_BELOW_UNITY = float(1.-4.*_float_finfo.epsneg) _JUST_ABOVE_UNITY = float(1.+4.*_float_finfo.eps) def _get_first_sentence(s): """ Get the first sentence from a string and remove any carriage returns. """ x = re.match(r".*?\S\.\s", s) if x is not None: s = x.group(0) return s.replace('\n', ' ') def _iter_unit_summary(namespace): """ Generates the ``(unit, doc, represents, aliases, prefixes)`` tuple used to format the unit summary docs in `generate_unit_summary`. """ from . import core # Get all of the units, and keep track of which ones have SI # prefixes units = [] has_prefixes = set() for key, val in namespace.items(): # Skip non-unit items if not isinstance(val, core.UnitBase): continue # Skip aliases if key != val.name: continue if isinstance(val, core.PrefixUnit): # This will return the root unit that is scaled by the prefix # attached to it has_prefixes.add(val._represents.bases[0].name) else: units.append(val) # Sort alphabetically, case insensitive units.sort(key=lambda x: x.name.lower()) for unit in units: doc = _get_first_sentence(unit.__doc__).strip() represents = '' if isinstance(unit, core.Unit): represents = f":math:`{unit._represents.to_string('latex')[1:-1]}`" aliases = ', '.join(f'``{x}``' for x in unit.aliases) yield (unit, doc, represents, aliases, 'Yes' if unit.name in has_prefixes else 'No') def generate_unit_summary(namespace): """ Generates a summary of units from a given namespace. This is used to generate the docstring for the modules that define the actual units. Parameters ---------- namespace : dict A namespace containing units. Returns ------- docstring : str A docstring containing a summary table of the units. """ docstring = io.StringIO() docstring.write(""" .. list-table:: Available Units :header-rows: 1 :widths: 10 20 20 20 1 * - Unit - Description - Represents - Aliases - SI Prefixes """) for unit_summary in _iter_unit_summary(namespace): docstring.write(""" * - ``{}`` - {} - {} - {} - {} """.format(*unit_summary)) return docstring.getvalue() def generate_prefixonly_unit_summary(namespace): """ Generates table entries for units in a namespace that are just prefixes without the base unit. Note that this is intended to be used *after* `generate_unit_summary` and therefore does not include the table header. Parameters ---------- namespace : dict A namespace containing units that are prefixes but do *not* have the base unit in their namespace. Returns ------- docstring : str A docstring containing a summary table of the units. """ from . import PrefixUnit faux_namespace = {} for nm, unit in namespace.items(): if isinstance(unit, PrefixUnit): base_unit = unit.represents.bases[0] faux_namespace[base_unit.name] = base_unit docstring = io.StringIO() for unit_summary in _iter_unit_summary(faux_namespace): docstring.write(""" * - Prefixes for ``{}`` - {} prefixes - {} - {} - Only """.format(*unit_summary)) return docstring.getvalue() def is_effectively_unity(value): # value is *almost* always real, except, e.g., for u.mag**0.5, when # it will be complex. Use try/except to ensure normal case is fast try: return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY except TypeError: # value is complex return (_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY and _JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY) def sanitize_scale(scale): if is_effectively_unity(scale): return 1.0 # Maximum speed for regular case where scale is a float. if scale.__class__ is float: return scale # We cannot have numpy scalars, since they don't autoconvert to # complex if necessary. They are also slower. if hasattr(scale, 'dtype'): scale = scale.item() # All classes that scale can be (int, float, complex, Fraction) # have an "imag" attribute. if scale.imag: if abs(scale.real) > abs(scale.imag): if is_effectively_unity(scale.imag/scale.real + 1): return scale.real elif is_effectively_unity(scale.real/scale.imag + 1): return complex(0., scale.imag) return scale else: return scale.real def maybe_simple_fraction(p, max_denominator=100): """Fraction very close to x with denominator at most max_denominator. The fraction has to be such that fraction/x is unity to within 4 ulp. If such a fraction does not exist, returns the float number. The algorithm is that of `fractions.Fraction.limit_denominator`, but sped up by not creating a fraction to start with. """ if p == 0 or p.__class__ is int: return p n, d = p.as_integer_ratio() a = n // d # Normally, start with 0,1 and 1,0; here we have applied first iteration. n0, d0 = 1, 0 n1, d1 = a, 1 while d1 <= max_denominator: if _JUST_BELOW_UNITY <= n1/(d1*p) <= _JUST_ABOVE_UNITY: return Fraction(n1, d1) n, d = d, n-a*d a = n // d n0, n1 = n1, n0+a*n1 d0, d1 = d1, d0+a*d1 return p def validate_power(p): """Convert a power to a floating point value, an integer, or a Fraction. If a fractional power can be represented exactly as a floating point number, convert it to a float, to make the math much faster; otherwise, retain it as a `fractions.Fraction` object to avoid losing precision. Conversely, if the value is indistinguishable from a rational number with a low-numbered denominator, convert to a Fraction object. Parameters ---------- p : float, int, Rational, Fraction Power to be converted """ denom = getattr(p, 'denominator', None) if denom is None: try: p = float(p) except Exception: if not np.isscalar(p): raise ValueError("Quantities and Units may only be raised " "to a scalar power") else: raise # This returns either a (simple) Fraction or the same float. p = maybe_simple_fraction(p) # If still a float, nothing more to be done. if isinstance(p, float): return p # Otherwise, check for simplifications. denom = p.denominator if denom == 1: p = p.numerator elif (denom & (denom - 1)) == 0: # Above is a bit-twiddling hack to see if denom is a power of two. # If so, float does not lose precision and will speed things up. p = float(p) return p def resolve_fractions(a, b): """ If either input is a Fraction, convert the other to a Fraction (at least if it does not have a ridiculous denominator). This ensures that any operation involving a Fraction will use rational arithmetic and preserve precision. """ # We short-circuit on the most common cases of int and float, since # isinstance(a, Fraction) is very slow for any non-Fraction instances. a_is_fraction = (a.__class__ is not int and a.__class__ is not float and isinstance(a, Fraction)) b_is_fraction = (b.__class__ is not int and b.__class__ is not float and isinstance(b, Fraction)) if a_is_fraction and not b_is_fraction: b = maybe_simple_fraction(b) elif not a_is_fraction and b_is_fraction: a = maybe_simple_fraction(a) return a, b def quantity_asanyarray(a, dtype=None): from .quantity import Quantity if not isinstance(a, np.ndarray) and not np.isscalar(a) and any(isinstance(x, Quantity) for x in a): return Quantity(a, dtype=dtype) else: return np.asanyarray(a, dtype=dtype)
98d90b12739822c658236023dbfa4ba1e70bcecfe2b6c8ce4758d9dac28d0ff6
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the CGS units. They are also available in the top-level `astropy.units` namespace. """ from fractions import Fraction from . import si from .core import UnitBase, def_unit _ns = globals() def_unit(['cm', 'centimeter'], si.cm, namespace=_ns, prefixes=False) g = si.g s = si.s C = si.C rad = si.rad sr = si.sr cd = si.cd K = si.K deg_C = si.deg_C mol = si.mol ########################################################################## # ACCELERATION def_unit(['Gal', 'gal'], cm / s ** 2, namespace=_ns, prefixes=True, doc="Gal: CGS unit of acceleration") ########################################################################## # ENERGY # Use CGS definition of erg def_unit(['erg'], g * cm ** 2 / s ** 2, namespace=_ns, prefixes=True, doc="erg: CGS unit of energy") ########################################################################## # FORCE def_unit(['dyn', 'dyne'], g * cm / s ** 2, namespace=_ns, prefixes=True, doc="dyne: CGS unit of force") ########################################################################## # PRESSURE def_unit(['Ba', 'Barye', 'barye'], g / (cm * s ** 2), namespace=_ns, prefixes=True, doc="Barye: CGS unit of pressure") ########################################################################## # DYNAMIC VISCOSITY def_unit(['P', 'poise'], g / (cm * s), namespace=_ns, prefixes=True, doc="poise: CGS unit of dynamic viscosity") ########################################################################## # KINEMATIC VISCOSITY def_unit(['St', 'stokes'], cm ** 2 / s, namespace=_ns, prefixes=True, doc="stokes: CGS unit of kinematic viscosity") ########################################################################## # WAVENUMBER def_unit(['k', 'Kayser', 'kayser'], cm ** -1, namespace=_ns, prefixes=True, doc="kayser: CGS unit of wavenumber") ########################################################################### # ELECTRICAL def_unit(['D', 'Debye', 'debye'], Fraction(1, 3) * 1e-29 * C * si.m, namespace=_ns, prefixes=True, doc="Debye: CGS unit of electric dipole moment") def_unit(['Fr', 'Franklin', 'statcoulomb', 'statC', 'esu'], g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s ** -1, namespace=_ns, doc='Franklin: CGS (ESU) unit of charge') def_unit(['statA', 'statampere'], Fr * s ** -1, namespace=_ns, doc='statampere: CGS (ESU) unit of current') def_unit(['Bi', 'Biot', 'abA', 'abampere'], g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s ** -1, namespace=_ns, doc='Biot: CGS (EMU) unit of current') def_unit(['abC', 'abcoulomb'], Bi * s, namespace=_ns, doc='abcoulomb: CGS (EMU) of charge') ########################################################################### # MAGNETIC def_unit(['G', 'Gauss', 'gauss'], 1e-4 * si.T, namespace=_ns, prefixes=True, doc="Gauss: CGS unit for magnetic field") ########################################################################### # BASES bases = set([cm, g, s, rad, cd, K, mol]) ########################################################################### # CLEANUP del UnitBase del def_unit del si del Fraction ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
0b2781faaca6b6933abaa13f2824ea15230bbd10fdeb9f957443315a055f1d91
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Support for ``typing`` py3.9+ features while min version is py3.8. """ from typing import * try: # py 3.9+ from typing import Annotated except (ImportError, ModuleNotFoundError): # optional dependency try: from typing_extensions import Annotated except (ImportError, ModuleNotFoundError): Annotated = NotImplemented else: from typing_extensions import * # override typing HAS_ANNOTATED = Annotated is not NotImplemented
803f838d8eb7c8a0c705c85801a5d3a7910d261cd3ec6b0c93f820d7a20e9f56
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines deprecated units. These units are not available in the top-level `astropy.units` namespace. To use these units, you must import the `astropy.units.deprecated` module:: >>> from astropy.units import deprecated >>> q = 10. * deprecated.emu # doctest: +SKIP To include them in `~astropy.units.UnitBase.compose` and the results of `~astropy.units.UnitBase.find_equivalent_units`, do:: >>> from astropy.units import deprecated >>> deprecated.enable() # doctest: +SKIP """ _ns = globals() def _initialize_module(): # Local imports to avoid polluting top-level namespace from . import cgs from . import astrophys from .core import def_unit, _add_prefixes def_unit(['emu'], cgs.Bi, namespace=_ns, doc='Biot: CGS (EMU) unit of current') # Add only some *prefixes* as deprecated units. _add_prefixes(astrophys.jupiterMass, namespace=_ns, prefixes=True) _add_prefixes(astrophys.earthMass, namespace=_ns, prefixes=True) _add_prefixes(astrophys.jupiterRad, namespace=_ns, prefixes=True) _add_prefixes(astrophys.earthRad, namespace=_ns, prefixes=True) _initialize_module() ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import (generate_unit_summary as _generate_unit_summary, generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary) if __doc__ is not None: __doc__ += _generate_unit_summary(globals()) __doc__ += _generate_prefixonly_unit_summary(globals()) def enable(): """ Enable deprecated units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable deprecated units only temporarily. """ # Local import to avoid cyclical import from .core import add_enabled_units # Local import to avoid polluting namespace import inspect return add_enabled_units(inspect.getmodule(enable))
da2c0c67aa0ef24cf668a71f1e193d77284f17db328cafe168a9477cec0b0494
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines structured units and quantities. """ # Standard library import operator import numpy as np from .core import Unit, UnitBase, UNITY __all__ = ['StructuredUnit'] DTYPE_OBJECT = np.dtype('O') def _names_from_dtype(dtype): """Recursively extract field names from a dtype.""" names = [] for name in dtype.names: subdtype = dtype.fields[name][0] if subdtype.names: names.append([name, _names_from_dtype(subdtype)]) else: names.append(name) return tuple(names) def _normalize_names(names): """Recursively normalize, inferring upper level names for unadorned tuples. Generally, we want the field names to be organized like dtypes, as in ``(['pv', ('p', 'v')], 't')``. But we automatically infer upper field names if the list is absent from items like ``(('p', 'v'), 't')``, by concatenating the names inside the tuple. """ result = [] for name in names: if isinstance(name, str) and len(name) > 0: result.append(name) elif (isinstance(name, list) and len(name) == 2 and isinstance(name[0], str) and len(name[0]) > 0 and isinstance(name[1], tuple) and len(name[1]) > 0): result.append([name[0], _normalize_names(name[1])]) elif isinstance(name, tuple) and len(name) > 0: new_tuple = _normalize_names(name) result.append([''.join([(i[0] if isinstance(i, list) else i) for i in new_tuple]), new_tuple]) else: raise ValueError(f'invalid entry {name!r}. Should be a name, ' 'tuple of names, or 2-element list of the ' 'form [name, tuple of names].') return tuple(result) class StructuredUnit: """Container for units for a structured Quantity. Parameters ---------- units : unit-like, tuple of unit-like, or `~astropy.units.StructuredUnit` Tuples can be nested. If a `~astropy.units.StructuredUnit` is passed in, it will be returned unchanged unless different names are requested. names : tuple of str, tuple or list; `~numpy.dtype`; or `~astropy.units.StructuredUnit`, optional Field names for the units, possibly nested. Can be inferred from a structured `~numpy.dtype` or another `~astropy.units.StructuredUnit`. For nested tuples, by default the name of the upper entry will be the concatenation of the names of the lower levels. One can pass in a list with the upper-level name and a tuple of lower-level names to avoid this. For tuples, not all levels have to be given; for any level not passed in, default field names of 'f0', 'f1', etc., will be used. Notes ----- It is recommended to initialze the class indirectly, using `~astropy.units.Unit`. E.g., ``u.Unit('AU,AU/day')``. When combined with a structured array to produce a structured `~astropy.units.Quantity`, array field names will take precedence. Generally, passing in ``names`` is needed only if the unit is used unattached to a `~astropy.units.Quantity` and one needs to access its fields. Examples -------- Various ways to initialize a `~astropy.units.StructuredUnit`:: >>> import astropy.units as u >>> su = u.Unit('(AU,AU/day),yr') >>> su Unit("((AU, AU / d), yr)") >>> su.field_names (['f0', ('f0', 'f1')], 'f1') >>> su['f1'] Unit("yr") >>> su2 = u.StructuredUnit(((u.AU, u.AU/u.day), u.yr), names=(('p', 'v'), 't')) >>> su2 == su True >>> su2.field_names (['pv', ('p', 'v')], 't') >>> su3 = u.StructuredUnit((su2['pv'], u.day), names=(['p_v', ('p', 'v')], 't')) >>> su3.field_names (['p_v', ('p', 'v')], 't') >>> su3.keys() ('p_v', 't') >>> su3.values() (Unit("(AU, AU / d)"), Unit("d")) Structured units share most methods with regular units:: >>> su.physical_type ((PhysicalType('length'), PhysicalType({'speed', 'velocity'})), PhysicalType('time')) >>> su.si Unit("((1.49598e+11 m, 1.73146e+06 m / s), 3.15576e+07 s)") """ def __new__(cls, units, names=None): dtype = None if names is not None: if isinstance(names, StructuredUnit): dtype = names._units.dtype names = names.field_names elif isinstance(names, np.dtype): if not names.fields: raise ValueError('dtype should be structured, with fields.') dtype = np.dtype([(name, DTYPE_OBJECT) for name in names.names]) names = _names_from_dtype(names) else: if not isinstance(names, tuple): names = (names,) names = _normalize_names(names) if not isinstance(units, tuple): units = Unit(units) if isinstance(units, StructuredUnit): # Avoid constructing a new StructuredUnit if no field names # are given, or if all field names are the same already anyway. if names is None or units.field_names == names: return units # Otherwise, turn (the upper level) into a tuple, for renaming. units = units.values() else: # Single regular unit: make a tuple for iteration below. units = (units,) if names is None: names = tuple(f'f{i}' for i in range(len(units))) elif len(units) != len(names): raise ValueError("lengths of units and field names must match.") converted = [] for unit, name in zip(units, names): if isinstance(name, list): # For list, the first item is the name of our level, # and the second another tuple of names, i.e., we recurse. unit = cls(unit, name[1]) name = name[0] else: # We are at the lowest level. Check unit. unit = Unit(unit) if dtype is not None and isinstance(unit, StructuredUnit): raise ValueError("units do not match in depth with field " "names from dtype or structured unit.") converted.append(unit) self = super().__new__(cls) if dtype is None: dtype = np.dtype([((name[0] if isinstance(name, list) else name), DTYPE_OBJECT) for name in names]) # Decay array to void so we can access by field name and number. self._units = np.array(tuple(converted), dtype)[()] return self def __getnewargs__(self): """When de-serializing, e.g. pickle, start with a blank structure.""" return (), None @property def field_names(self): """Possibly nested tuple of the field names of the parts.""" return tuple(([name, unit.field_names] if isinstance(unit, StructuredUnit) else name) for name, unit in self.items()) # Allow StructuredUnit to be treated as an (ordered) mapping. def __len__(self): return len(self._units.dtype.names) def __getitem__(self, item): # Since we are based on np.void, indexing by field number works too. return self._units[item] def values(self): return self._units.item() def keys(self): return self._units.dtype.names def items(self): return tuple(zip(self._units.dtype.names, self._units.item())) def __iter__(self): yield from self._units.dtype.names # Helpers for methods below. def _recursively_apply(self, func, cls=None): """Apply func recursively. Parameters ---------- func : callable Function to apply to all parts of the structured unit, recursing as needed. cls : type, optional If given, should be a subclass of `~numpy.void`. By default, will return a new `~astropy.units.StructuredUnit` instance. """ results = np.array(tuple([func(part) for part in self.values()]), self._units.dtype)[()] if cls is not None: return results.view((cls, results.dtype)) # Short-cut; no need to interpret field names, etc. result = super().__new__(self.__class__) result._units = results return result def _recursively_get_dtype(self, value, enter_lists=True): """Get structured dtype according to value, using our field names. This is useful since ``np.array(value)`` would treat tuples as lower levels of the array, rather than as elements of a structured array. The routine does presume that the type of the first tuple is representative of the rest. Used in ``_get_converter``. For the special value of ``UNITY``, all fields are assumed to be 1.0, and hence this will return an all-float dtype. """ if enter_lists: while isinstance(value, list): value = value[0] if value is UNITY: value = (UNITY,) * len(self) elif not isinstance(value, tuple) or len(self) != len(value): raise ValueError(f"cannot interpret value {value} for unit {self}.") descr = [] for (name, unit), part in zip(self.items(), value): if isinstance(unit, StructuredUnit): descr.append( (name, unit._recursively_get_dtype(part, enter_lists=False))) else: # Got a part associated with a regular unit. Gets its dtype. # Like for Quantity, we cast integers to float. part = np.array(part) part_dtype = part.dtype if part_dtype.kind in 'iu': part_dtype = np.dtype(float) descr.append((name, part_dtype, part.shape)) return np.dtype(descr) @property def si(self): """The `StructuredUnit` instance in SI units.""" return self._recursively_apply(operator.attrgetter('si')) @property def cgs(self): """The `StructuredUnit` instance in cgs units.""" return self._recursively_apply(operator.attrgetter('cgs')) # Needed to pass through Unit initializer, so might as well use it. def _get_physical_type_id(self): return self._recursively_apply( operator.methodcaller('_get_physical_type_id'), cls=Structure) @property def physical_type(self): """Physical types of all the fields.""" return self._recursively_apply( operator.attrgetter('physical_type'), cls=Structure) def decompose(self, bases=set()): """The `StructuredUnit` composed of only irreducible units. Parameters ---------- bases : sequence of `~astropy.units.UnitBase`, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `UnitsError` if it's not possible to do so. Returns ------- `~astropy.units.StructuredUnit` With the unit for each field containing only irreducible units. """ return self._recursively_apply( operator.methodcaller('decompose', bases=bases)) def is_equivalent(self, other, equivalencies=[]): """`True` if all fields are equivalent to the other's fields. Parameters ---------- other : `~astropy.units.StructuredUnit` The structured unit to compare with, or what can initialize one. equivalencies : list of tuple, optional A list of equivalence pairs to try if the units are not directly convertible. See :ref:`unit_equivalencies`. The list will be applied to all fields. Returns ------- bool """ try: other = StructuredUnit(other) except Exception: return False if len(self) != len(other): return False for self_part, other_part in zip(self.values(), other.values()): if not self_part.is_equivalent(other_part, equivalencies=equivalencies): return False return True def _get_converter(self, other, equivalencies=[]): if not isinstance(other, type(self)): other = self.__class__(other, names=self) converters = [self_part._get_converter(other_part, equivalencies=equivalencies) for (self_part, other_part) in zip(self.values(), other.values())] def converter(value): if not hasattr(value, 'dtype'): value = np.array(value, self._recursively_get_dtype(value)) result = np.empty_like(value) for name, converter_ in zip(result.dtype.names, converters): result[name] = converter_(value[name]) # Index with empty tuple to decay array scalars to numpy void. return result if result.shape else result[()] return converter def to(self, other, value=np._NoValue, equivalencies=[]): """Return values converted to the specified unit. Parameters ---------- other : `~astropy.units.StructuredUnit` The unit to convert to. If necessary, will be converted to a `~astropy.units.StructuredUnit` using the dtype of ``value``. value : array-like, optional Value(s) in the current unit to be converted to the specified unit. If a sequence, the first element must have entries of the correct type to represent all elements (i.e., not have, e.g., a ``float`` where other elements have ``complex``). If not given, assumed to have 1. in all fields. equivalencies : list of tuple, optional A list of equivalence pairs to try if the units are not directly convertible. See :ref:`unit_equivalencies`. This list is in addition to possible global defaults set by, e.g., `set_enabled_equivalencies`. Use `None` to turn off all equivalencies. Returns ------- values : scalar or array Converted value(s). Raises ------ UnitsError If units are inconsistent """ if value is np._NoValue: # We do not have UNITY as a default, since then the docstring # would list 1.0 as default, yet one could not pass that in. value = UNITY return self._get_converter(other, equivalencies=equivalencies)(value) def to_string(self, format='generic'): """Output the unit in the given format as a string. Units are separated by commas. Parameters ---------- format : `astropy.units.format.Base` instance or str The name of a format or a formatter object. If not provided, defaults to the generic format. Notes ----- Structured units can be written to all formats, but can be re-read only with 'generic'. """ parts = [part.to_string(format) for part in self.values()] out_fmt = '({})' if len(self) > 1 else '({},)' if format == 'latex': # Strip $ from parts and add them on the outside. parts = [part[1:-1] for part in parts] out_fmt = '$' + out_fmt + '$' return out_fmt.format(', '.join(parts)) def _repr_latex_(self): return self.to_string('latex') __array_ufunc__ = None def __mul__(self, other): if isinstance(other, str): try: other = Unit(other, parse_strict='silent') except Exception: return NotImplemented if isinstance(other, UnitBase): new_units = tuple(part * other for part in self.values()) return self.__class__(new_units, names=self) if isinstance(other, StructuredUnit): return NotImplemented # Anything not like a unit, try initialising as a structured quantity. try: from .quantity import Quantity return Quantity(other, unit=self) except Exception: return NotImplemented def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): if isinstance(other, str): try: other = Unit(other, parse_strict='silent') except Exception: return NotImplemented if isinstance(other, UnitBase): new_units = tuple(part / other for part in self.values()) return self.__class__(new_units, names=self) return NotImplemented def __rlshift__(self, m): try: from .quantity import Quantity return Quantity(m, self, copy=False, subok=True) except Exception: return NotImplemented def __str__(self): return self.to_string() def __repr__(self): return f'Unit("{self.to_string()}")' def __eq__(self, other): try: other = StructuredUnit(other) except Exception: return NotImplemented return self.values() == other.values() def __ne__(self, other): if not isinstance(other, type(self)): try: other = StructuredUnit(other) except Exception: return NotImplemented return self.values() != other.values() class Structure(np.void): """Single element structure for physical type IDs, etc. Behaves like a `~numpy.void` and thus mostly like a tuple which can also be indexed with field names, but overrides ``__eq__`` and ``__ne__`` to compare only the contents, not the field names. Furthermore, this way no `FutureWarning` about comparisons is given. """ # Note that it is important for physical type IDs to not be stored in a # tuple, since then the physical types would be treated as alternatives in # :meth:`~astropy.units.UnitBase.is_equivalent`. (Of course, in that # case, they could also not be indexed by name.) def __eq__(self, other): if isinstance(other, np.void): other = other.item() return self.item() == other def __ne__(self, other): if isinstance(other, np.void): other = other.item() return self.item() != other
5538c71dc43926cfd96507025b7e399519cdaff9d82bcc31c119b191143119e0
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines the `Quantity` object, which represents a number with some associated units. `Quantity` objects support operations like ordinary numbers, but will deal with unit conversions internally. """ # Standard library import re import numbers from fractions import Fraction import operator import warnings import numpy as np # AstroPy from .core import (Unit, dimensionless_unscaled, get_current_unit_registry, UnitBase, UnitsError, UnitConversionError, UnitTypeError) from .structured import StructuredUnit from .utils import is_effectively_unity from .format.latex import Latex from astropy.utils.compat.misc import override__dir__ from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning from astropy.utils.misc import isiterable from astropy.utils.data_info import ParentDtypeInfo from astropy import config as _config from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit, check_output) from .quantity_helper.function_helpers import ( SUBCLASS_SAFE_FUNCTIONS, FUNCTION_HELPERS, DISPATCHED_FUNCTIONS, UNSUPPORTED_FUNCTIONS) __all__ = ["Quantity", "SpecificTypeQuantity", "QuantityInfoBase", "QuantityInfo", "allclose", "isclose"] # We don't want to run doctests in the docstrings we inherit from Numpy __doctest_skip__ = ['Quantity.*'] _UNIT_NOT_INITIALISED = "(Unit not initialised)" _UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh} class Conf(_config.ConfigNamespace): """ Configuration parameters for Quantity """ latex_array_threshold = _config.ConfigItem(100, 'The maximum size an array Quantity can be before its LaTeX ' 'representation for IPython gets "summarized" (meaning only the first ' 'and last few elements are shown with "..." between). Setting this to a ' 'negative number means that the value will instead be whatever numpy ' 'gets from get_printoptions.') conf = Conf() class QuantityIterator: """ Flat iterator object to iterate over Quantities A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity ``q``. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in C-contiguous style, with the last index varying the fastest. The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- Quantity.flatten : Returns a flattened copy of an array. Notes ----- `QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It is not exported by the `~astropy.units` module. Instead of instantiating a `QuantityIterator` directly, use `Quantity.flat`. """ def __init__(self, q): self._quantity = q self._dataiter = q.view(np.ndarray).flat def __iter__(self): return self def __getitem__(self, indx): out = self._dataiter.__getitem__(indx) # For single elements, ndarray.flat.__getitem__ returns scalars; these # need a new view as a Quantity. if isinstance(out, type(self._quantity)): return out else: return self._quantity._new_view(out) def __setitem__(self, index, value): self._dataiter[index] = self._quantity._to_own_unit(value) def __next__(self): """ Return the next value, or raise StopIteration. """ out = next(self._dataiter) # ndarray.flat._dataiter returns scalars, so need a view as a Quantity. return self._quantity._new_view(out) next = __next__ def __len__(self): return len(self._dataiter) #### properties and methods to match `numpy.ndarray.flatiter` #### @property def base(self): """A reference to the array that is iterated over.""" return self._quantity @property def coords(self): """An N-dimensional tuple of current coordinates.""" return self._dataiter.coords @property def index(self): """Current flat index into the array.""" return self._dataiter.index def copy(self): """Get a copy of the iterator as a 1-D array.""" return self._quantity.flatten() class QuantityInfoBase(ParentDtypeInfo): # This is on a base class rather than QuantityInfo directly, so that # it can be used for EarthLocationInfo yet make clear that that class # should not be considered a typical Quantity subclass by Table. attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent _supports_indexing = True @staticmethod def default_format(val): return f'{val.value}' @staticmethod def possible_string_format_functions(format_): """Iterate through possible string-derived format functions. A string can either be a format specifier for the format built-in, a new-style format string, or an old-style format string. This method is overridden in order to suppress printing the unit in each row since it is already at the top in the column header. """ yield lambda format_, val: format(val.value, format_) yield lambda format_, val: format_.format(val.value) yield lambda format_, val: format_ % val.value class QuantityInfo(QuantityInfoBase): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ _represent_as_dict_attrs = ('value', 'unit') _construct_from_dict_args = ['value'] _represent_as_dict_primary_data = 'value' def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new Quantity instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : `~astropy.units.Quantity` (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'format', 'description')) # Make an empty quantity using the unit of the last one. shape = (length,) + attrs.pop('shape') dtype = attrs.pop('dtype') # Use zeros so we do not get problems for Quantity subclasses such # as Longitude and Latitude, which cannot take arbitrary values. data = np.zeros(shape=shape, dtype=dtype) # Get arguments needed to reconstruct class map = {key: (data if key == 'value' else getattr(cols[-1], key)) for key in self._represent_as_dict_attrs} map['copy'] = False out = self._construct_from_dict(map) # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. For Quantity this is just the quantity itself. Returns ------- arrays : list of ndarray """ return [self._parent] class Quantity(np.ndarray): """A `~astropy.units.Quantity` represents a number with some associated unit. See also: https://docs.astropy.org/en/stable/units/quantity.html Parameters ---------- value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str The numerical value of this quantity in the units given by unit. If a `Quantity` or sequence of them (or any other valid object with a ``unit`` attribute), creates a new `Quantity` object, converting to `unit` units as needed. If a string, it is converted to a number or `Quantity`, depending on whether a unit is present. unit : unit-like An object that represents the unit associated with the input value. Must be an `~astropy.units.UnitBase` object or a string parseable by the :mod:`~astropy.units` package. dtype : ~numpy.dtype, optional The dtype of the resulting Numpy array or scalar that will hold the value. If not provided, it is determined from the input, except that any integer and (non-Quantity) object inputs are converted to float by default. copy : bool, optional If `True` (default), then the value is copied. Otherwise, a copy will only be made if ``__array__`` returns a copy, if value is a nested sequence, or if a copy is needed to satisfy an explicitly given ``dtype``. (The `False` option is intended mostly for internal use, to speed up initialization where a copy is known to have been made. Use with care.) order : {'C', 'F', 'A'}, optional Specify the order of the array. As in `~numpy.array`. This parameter is ignored if the input is a `Quantity` and ``copy=False``. subok : bool, optional If `False` (default), the returned array will be forced to be a `Quantity`. Otherwise, `Quantity` subclasses will be passed through, or a subclass appropriate for the unit will be used (such as `~astropy.units.Dex` for ``u.dex(u.AA)``). ndmin : int, optional Specifies the minimum number of dimensions that the resulting array should have. Ones will be pre-pended to the shape as needed to meet this requirement. This parameter is ignored if the input is a `Quantity` and ``copy=False``. Raises ------ TypeError If the value provided is not a Python numeric type. TypeError If the unit provided is not either a :class:`~astropy.units.Unit` object or a parseable string unit. Notes ----- Quantities can also be created by multiplying a number or array with a :class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/ Unless the ``dtype`` argument is explicitly specified, integer or (non-Quantity) object inputs are converted to `float` by default. """ # Need to set a class-level default for _equivalencies, or # Constants can not initialize properly _equivalencies = [] # Default unit for initialization; can be overridden by subclasses, # possibly to `None` to indicate there is no default unit. _default_unit = dimensionless_unscaled # Ensures views have an undefined unit. _unit = None __array_priority__ = 10000 def __class_getitem__(cls, unit_shape_dtype): """Quantity Type Hints. Unit-aware type hints are ``Annotated`` objects that encode the class, the unit, and possibly shape and dtype information, depending on the python and :mod:`numpy` versions. Schematically, ``Annotated[cls[shape, dtype], unit]`` As a classmethod, the type is the class, ie ``Quantity`` produces an ``Annotated[Quantity, ...]`` while a subclass like :class:`~astropy.coordinates.Angle` returns ``Annotated[Angle, ...]``. Parameters ---------- unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple Unit specification, can be the physical type (ie str or class). If tuple, then the first element is the unit specification and all other elements are for `numpy.ndarray` type annotations. Whether they are included depends on the python and :mod:`numpy` versions. Returns ------- `typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType` Return type in this preference order: * if python v3.9+ : `typing.Annotated` * if :mod:`typing_extensions` is installed : `typing_extensions.Annotated` * `astropy.units.Unit` or `astropy.units.PhysicalType` Raises ------ TypeError If the unit/physical_type annotation is not Unit-like or PhysicalType-like. Examples -------- Create a unit-aware Quantity type annotation >>> Quantity[Unit("s")] Annotated[Quantity, Unit("s")] See Also -------- `~astropy.units.quantity_input` Use annotations for unit checks on function arguments and results. Notes ----- With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also static-type compatible. """ # LOCAL from ._typing import HAS_ANNOTATED, Annotated # process whether [unit] or [unit, shape, ptype] if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype target = unit_shape_dtype[0] shape_dtype = unit_shape_dtype[1:] else: # just unit target = unit_shape_dtype shape_dtype = () # Allowed unit/physical types. Errors if neither. try: unit = Unit(target) except (TypeError, ValueError): from astropy.units.physical import get_physical_type try: unit = get_physical_type(target) except (TypeError, ValueError, KeyError): # KeyError for Enum raise TypeError("unit annotation is not a Unit or PhysicalType") from None # Allow to sort of work for python 3.8- / no typing_extensions # instead of bailing out, return the unit for `quantity_input` if not HAS_ANNOTATED: warnings.warn("Quantity annotations are valid static type annotations only" " if Python is v3.9+ or `typing_extensions` is installed.") return unit # Quantity does not (yet) properly extend the NumPy generics types, # introduced in numpy v1.22+, instead just including the unit info as # metadata using Annotated. # TODO: ensure we do interact with NDArray.__class_getitem__. return Annotated.__class_getitem__((cls, unit)) def __new__(cls, value, unit=None, dtype=None, copy=True, order=None, subok=False, ndmin=0): if unit is not None: # convert unit first, to avoid multiple string->unit conversions unit = Unit(unit) # optimize speed for Quantity with no dtype given, copy=False if isinstance(value, Quantity): if unit is not None and unit is not value.unit: value = value.to(unit) # the above already makes a copy (with float dtype) copy = False if type(value) is not cls and not (subok and isinstance(value, cls)): value = value.view(cls) if dtype is None and value.dtype.kind in 'iu': dtype = float return np.array(value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin) # Maybe str, or list/tuple of Quantity? If so, this may set value_unit. # To ensure array remains fast, we short-circuit it. value_unit = None if not isinstance(value, np.ndarray): if isinstance(value, str): # The first part of the regex string matches any integer/float; # the second parts adds possible trailing .+-, which will break # the float function below and ensure things like 1.2.3deg # will not work. pattern = (r'\s*[+-]?' r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|' r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))' r'([eE][+-]?\d+)?' r'[.+-]?') v = re.match(pattern, value) unit_string = None try: value = float(v.group()) except Exception: raise TypeError('Cannot parse "{}" as a {}. It does not ' 'start with a number.' .format(value, cls.__name__)) unit_string = v.string[v.end():].strip() if unit_string: value_unit = Unit(unit_string) if unit is None: unit = value_unit # signal no conversion needed below. elif isiterable(value) and len(value) > 0: # Iterables like lists and tuples. if all(isinstance(v, Quantity) for v in value): # If a list/tuple containing only quantities, convert all # to the same unit. if unit is None: unit = value[0].unit value = [q.to_value(unit) for q in value] value_unit = unit # signal below that conversion has been done elif (dtype is None and not hasattr(value, 'dtype') and isinstance(unit, StructuredUnit)): # Special case for list/tuple of values and a structured unit: # ``np.array(value, dtype=None)`` would treat tuples as lower # levels of the array, rather than as elements of a structured # array, so we use the structure of the unit to help infer the # structured dtype of the value. dtype = unit._recursively_get_dtype(value) if value_unit is None: # If the value has a `unit` attribute and if not None # (for Columns with uninitialized unit), treat it like a quantity. value_unit = getattr(value, 'unit', None) if value_unit is None: # Default to dimensionless for no (initialized) unit attribute. if unit is None: unit = cls._default_unit value_unit = unit # signal below that no conversion is needed else: try: value_unit = Unit(value_unit) except Exception as exc: raise TypeError("The unit attribute {!r} of the input could " "not be parsed as an astropy Unit, raising " "the following exception:\n{}" .format(value.unit, exc)) if unit is None: unit = value_unit elif unit is not value_unit: copy = False # copy will be made in conversion at end value = np.array(value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin) # check that array contains numbers or long int objects if (value.dtype.kind in 'OSU' and not (value.dtype.kind == 'O' and isinstance(value.item(0), numbers.Number))): raise TypeError("The value must be a valid Python or " "Numpy numeric type.") # by default, cast any integer, boolean, etc., to float if dtype is None and value.dtype.kind in 'iuO': value = value.astype(float) # if we allow subclasses, allow a class from the unit. if subok: qcls = getattr(unit, '_quantity_class', cls) if issubclass(qcls, cls): cls = qcls value = value.view(cls) value._set_unit(value_unit) if unit is value_unit: return value else: # here we had non-Quantity input that had a "unit" attribute # with a unit different from the desired one. So, convert. return value.to(unit) def __array_finalize__(self, obj): # Check whether super().__array_finalize should be called # (sadly, ndarray.__array_finalize__ is None; we cannot be sure # what is above us). super_array_finalize = super().__array_finalize__ if super_array_finalize is not None: super_array_finalize(obj) # If we're a new object or viewing an ndarray, nothing has to be done. if obj is None or obj.__class__ is np.ndarray: return # If our unit is not set and obj has a valid one, use it. if self._unit is None: unit = getattr(obj, '_unit', None) if unit is not None: self._set_unit(unit) # Copy info if the original had `info` defined. Because of the way the # DataInfo works, `'info' in obj.__dict__` is False until the # `info` attribute is accessed or set. if 'info' in obj.__dict__: self.info = obj.info def __array_wrap__(self, obj, context=None): if context is None: # Methods like .squeeze() created a new `ndarray` and then call # __array_wrap__ to turn the array into self's subclass. return self._new_view(obj) raise NotImplementedError('__array_wrap__ should not be used ' 'with a context any more since all use ' 'should go through array_function. ' 'Please raise an issue on ' 'https://github.com/astropy/astropy') def __array_ufunc__(self, function, method, *inputs, **kwargs): """Wrap numpy ufuncs, taking care of units. Parameters ---------- function : callable ufunc to wrap. method : str Ufunc method: ``__call__``, ``at``, ``reduce``, etc. inputs : tuple Input arrays. kwargs : keyword arguments As passed on, with ``out`` containing possible quantity output. Returns ------- result : `~astropy.units.Quantity` Results of the ufunc, with the unit set properly. """ # Determine required conversion functions -- to bring the unit of the # input to that expected (e.g., radian for np.sin), or to get # consistent units between two inputs (e.g., in np.add) -- # and the unit of the result (or tuple of units for nout > 1). converters, unit = converters_and_unit(function, method, *inputs) out = kwargs.get('out', None) # Avoid loop back by turning any Quantity output into array views. if out is not None: # If pre-allocated output is used, check it is suitable. # This also returns array view, to ensure we don't loop back. if function.nout == 1: out = out[0] out_array = check_output(out, unit, inputs, function=function) # Ensure output argument remains a tuple. kwargs['out'] = (out_array,) if function.nout == 1 else out_array # Same for inputs, but here also convert if necessary. arrays = [] for input_, converter in zip(inputs, converters): input_ = getattr(input_, 'value', input_) arrays.append(converter(input_) if converter else input_) # Call our superclass's __array_ufunc__ result = super().__array_ufunc__(function, method, *arrays, **kwargs) # If unit is None, a plain array is expected (e.g., comparisons), which # means we're done. # We're also done if the result was None (for method 'at') or # NotImplemented, which can happen if other inputs/outputs override # __array_ufunc__; hopefully, they can then deal with us. if unit is None or result is None or result is NotImplemented: return result return self._result_as_quantity(result, unit, out) def _result_as_quantity(self, result, unit, out): """Turn result into a quantity with the given unit. If no output is given, it will take a view of the array as a quantity, and set the unit. If output is given, those should be quantity views of the result arrays, and the function will just set the unit. Parameters ---------- result : ndarray or tuple thereof Array(s) which need to be turned into quantity. unit : `~astropy.units.Unit` Unit for the quantities to be returned (or `None` if the result should not be a quantity). Should be tuple if result is a tuple. out : `~astropy.units.Quantity` or None Possible output quantity. Should be `None` or a tuple if result is a tuple. Returns ------- out : `~astropy.units.Quantity` With units set. """ if isinstance(result, (tuple, list)): if out is None: out = (None,) * len(result) return result.__class__( self._result_as_quantity(result_, unit_, out_) for (result_, unit_, out_) in zip(result, unit, out)) if out is None: # View the result array as a Quantity with the proper unit. return result if unit is None else self._new_view(result, unit) # For given output, just set the unit. We know the unit is not None and # the output is of the correct Quantity subclass, as it was passed # through check_output. out._set_unit(unit) return out def __quantity_subclass__(self, unit): """ Overridden by subclasses to change what kind of view is created based on the output unit of an operation. Parameters ---------- unit : UnitBase The unit for which the appropriate class should be returned Returns ------- tuple : - `~astropy.units.Quantity` subclass - bool: True if subclasses of the given class are ok """ return Quantity, True def _new_view(self, obj=None, unit=None): """ Create a Quantity view of some array-like input, and set the unit By default, return a view of ``obj`` of the same class as ``self`` and with the same unit. Subclasses can override the type of class for a given unit using ``__quantity_subclass__``, and can ensure properties other than the unit are copied using ``__array_finalize__``. If the given unit defines a ``_quantity_class`` of which ``self`` is not an instance, a view using this class is taken. Parameters ---------- obj : ndarray or scalar, optional The array to create a view of. If obj is a numpy or python scalar, it will be converted to an array scalar. By default, ``self`` is converted. unit : unit-like, optional The unit of the resulting object. It is used to select a subclass, and explicitly assigned to the view if given. If not given, the subclass and unit will be that of ``self``. Returns ------- view : `~astropy.units.Quantity` subclass """ # Determine the unit and quantity subclass that we need for the view. if unit is None: unit = self.unit quantity_subclass = self.__class__ elif unit is self.unit and self.__class__ is Quantity: # The second part is because we should not presume what other # classes want to do for the same unit. E.g., Constant will # always want to fall back to Quantity, and relies on going # through `__quantity_subclass__`. quantity_subclass = Quantity else: unit = Unit(unit) quantity_subclass = getattr(unit, '_quantity_class', Quantity) if isinstance(self, quantity_subclass): quantity_subclass, subok = self.__quantity_subclass__(unit) if subok: quantity_subclass = self.__class__ # We only want to propagate information from ``self`` to our new view, # so obj should be a regular array. By using ``np.array``, we also # convert python and numpy scalars, which cannot be viewed as arrays # and thus not as Quantity either, to zero-dimensional arrays. # (These are turned back into scalar in `.value`) # Note that for an ndarray input, the np.array call takes only double # ``obj.__class is np.ndarray``. So, not worth special-casing. if obj is None: obj = self.view(np.ndarray) else: obj = np.array(obj, copy=False, subok=True) # Take the view, set the unit, and update possible other properties # such as ``info``, ``wrap_angle`` in `Longitude`, etc. view = obj.view(quantity_subclass) view._set_unit(unit) view.__array_finalize__(self) return view def _set_unit(self, unit): """Set the unit. This is used anywhere the unit is set or modified, i.e., in the initilizer, in ``__imul__`` and ``__itruediv__`` for in-place multiplication and division by another unit, as well as in ``__array_finalize__`` for wrapping up views. For Quantity, it just sets the unit, but subclasses can override it to check that, e.g., a unit is consistent. """ if not isinstance(unit, UnitBase): if (isinstance(self._unit, StructuredUnit) or isinstance(unit, StructuredUnit)): unit = StructuredUnit(unit, self.dtype) else: # Trying to go through a string ensures that, e.g., Magnitudes with # dimensionless physical unit become Quantity with units of mag. unit = Unit(str(unit), parse_strict='silent') if not isinstance(unit, (UnitBase, StructuredUnit)): raise UnitTypeError( "{} instances require normal units, not {} instances." .format(type(self).__name__, type(unit))) self._unit = unit def __deepcopy__(self, memo): # If we don't define this, ``copy.deepcopy(quantity)`` will # return a bare Numpy array. return self.copy() def __reduce__(self): # patch to pickle Quantity objects (ndarray subclasses), see # http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html object_state = list(super().__reduce__()) object_state[2] = (object_state[2], self.__dict__) return tuple(object_state) def __setstate__(self, state): # patch to unpickle Quantity objects (ndarray subclasses), see # http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html nd_state, own_state = state super().__setstate__(nd_state) self.__dict__.update(own_state) info = QuantityInfo() def _to_value(self, unit, equivalencies=[]): """Helper method for to and to_value.""" if equivalencies == []: equivalencies = self._equivalencies if not self.dtype.names or isinstance(self.unit, StructuredUnit): # Standard path, let unit to do work. return self.unit.to(unit, self.view(np.ndarray), equivalencies=equivalencies) else: # The .to() method of a simple unit cannot convert a structured # dtype, so we work around it, by recursing. # TODO: deprecate this? # Convert simple to Structured on initialization? result = np.empty_like(self.view(np.ndarray)) for name in self.dtype.names: result[name] = self[name]._to_value(unit, equivalencies) return result def to(self, unit, equivalencies=[], copy=True): """ Return a new `~astropy.units.Quantity` object with the specified unit. Parameters ---------- unit : unit-like An object that represents the unit to convert to. Must be an `~astropy.units.UnitBase` object or a string parseable by the `~astropy.units` package. equivalencies : list of tuple A list of equivalence pairs to try if the units are not directly convertible. See :ref:`astropy:unit_equivalencies`. If not provided or ``[]``, class default equivalencies will be used (none for `~astropy.units.Quantity`, but may be set for subclasses) If `None`, no equivalencies will be applied at all, not even any set globally or within a context. copy : bool, optional If `True` (default), then the value is copied. Otherwise, a copy will only be made if necessary. See also -------- to_value : get the numerical value in a given unit. """ # We don't use `to_value` below since we always want to make a copy # and don't want to slow down this method (esp. the scalar case). unit = Unit(unit) if copy: # Avoid using to_value to ensure that we make a copy. We also # don't want to slow down this method (esp. the scalar case). value = self._to_value(unit, equivalencies) else: # to_value only copies if necessary value = self.to_value(unit, equivalencies) return self._new_view(value, unit) def to_value(self, unit=None, equivalencies=[]): """ The numerical value, possibly in a different unit. Parameters ---------- unit : unit-like, optional The unit in which the value should be given. If not given or `None`, use the current unit. equivalencies : list of tuple, optional A list of equivalence pairs to try if the units are not directly convertible (see :ref:`astropy:unit_equivalencies`). If not provided or ``[]``, class default equivalencies will be used (none for `~astropy.units.Quantity`, but may be set for subclasses). If `None`, no equivalencies will be applied at all, not even any set globally or within a context. Returns ------- value : ndarray or scalar The value in the units specified. For arrays, this will be a view of the data if no unit conversion was necessary. See also -------- to : Get a new instance in a different unit. """ if unit is None or unit is self.unit: value = self.view(np.ndarray) elif not self.dtype.names: # For non-structured, we attempt a short-cut, where we just get # the scale. If that is 1, we do not have to do anything. unit = Unit(unit) # We want a view if the unit does not change. One could check # with "==", but that calculates the scale that we need anyway. # TODO: would be better for `unit.to` to have an in-place flag. try: scale = self.unit._to(unit) except Exception: # Short-cut failed; try default (maybe equivalencies help). value = self._to_value(unit, equivalencies) else: value = self.view(np.ndarray) if not is_effectively_unity(scale): # not in-place! value = value * scale else: # For structured arrays, we go the default route. value = self._to_value(unit, equivalencies) # Index with empty tuple to decay array scalars in to numpy scalars. return value if value.shape else value[()] value = property(to_value, doc="""The numerical value of this instance. See also -------- to_value : Get the numerical value in a given unit. """) @property def unit(self): """ A `~astropy.units.UnitBase` object representing the unit of this quantity. """ return self._unit @property def equivalencies(self): """ A list of equivalencies that will be applied by default during unit conversions. """ return self._equivalencies def _recursively_apply(self, func): """Apply function recursively to every field. Returns a copy with the result. """ result = np.empty_like(self) result_value = result.view(np.ndarray) result_unit = () for name in self.dtype.names: part = func(self[name]) result_value[name] = part.value result_unit += (part.unit,) result._set_unit(result_unit) return result @property def si(self): """ Returns a copy of the current `Quantity` instance with SI units. The value of the resulting object will be scaled. """ if self.dtype.names: return self._recursively_apply(operator.attrgetter('si')) si_unit = self.unit.si return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale) @property def cgs(self): """ Returns a copy of the current `Quantity` instance with CGS units. The value of the resulting object will be scaled. """ if self.dtype.names: return self._recursively_apply(operator.attrgetter('cgs')) cgs_unit = self.unit.cgs return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale) @property def isscalar(self): """ True if the `value` of this quantity is a scalar, or False if it is an array-like object. .. note:: This is subtly different from `numpy.isscalar` in that `numpy.isscalar` returns False for a zero-dimensional array (e.g. ``np.array(1)``), while this is True for quantities, since quantities cannot represent true numpy scalars. """ return not self.shape # This flag controls whether convenience conversion members, such # as `q.m` equivalent to `q.to_value(u.m)` are available. This is # not turned on on Quantity itself, but is on some subclasses of # Quantity, such as `astropy.coordinates.Angle`. _include_easy_conversion_members = False @override__dir__ def __dir__(self): """ Quantities are able to directly convert to other units that have the same physical type. This function is implemented in order to make autocompletion still work correctly in IPython. """ if not self._include_easy_conversion_members: return [] extra_members = set() equivalencies = Unit._normalize_equivalencies(self.equivalencies) for equivalent in self.unit._get_units_with_same_physical_type( equivalencies): extra_members.update(equivalent.names) return extra_members def __getattr__(self, attr): """ Quantities are able to directly convert to other units that have the same physical type. """ if not self._include_easy_conversion_members: raise AttributeError( f"'{self.__class__.__name__}' object has no '{attr}' member") def get_virtual_unit_attribute(): registry = get_current_unit_registry().registry to_unit = registry.get(attr, None) if to_unit is None: return None try: return self.unit.to( to_unit, self.value, equivalencies=self.equivalencies) except UnitsError: return None value = get_virtual_unit_attribute() if value is None: raise AttributeError( f"{self.__class__.__name__} instance has no attribute '{attr}'") else: return value # Equality needs to be handled explicitly as ndarray.__eq__ gives # DeprecationWarnings on any error, which is distracting, and does not # deal well with structured arrays (nor does the ufunc). def __eq__(self, other): try: other_value = self._to_own_unit(other) except UnitsError: return False except Exception: return NotImplemented return self.value.__eq__(other_value) def __ne__(self, other): try: other_value = self._to_own_unit(other) except UnitsError: return True except Exception: return NotImplemented return self.value.__ne__(other_value) # Unit conversion operator (<<). def __lshift__(self, other): try: other = Unit(other, parse_strict='silent') except UnitTypeError: return NotImplemented return self.__class__(self, other, copy=False, subok=True) def __ilshift__(self, other): try: other = Unit(other, parse_strict='silent') except UnitTypeError: return NotImplemented try: factor = self.unit._to(other) except Exception: # Maybe via equivalencies? Now we do make a temporary copy. try: value = self._to_value(other) except UnitConversionError: return NotImplemented self.view(np.ndarray)[...] = value else: self.view(np.ndarray)[...] *= factor self._set_unit(other) return self def __rlshift__(self, other): if not self.isscalar: return NotImplemented return Unit(self).__rlshift__(other) # Give warning for other >> self, since probably other << self was meant. def __rrshift__(self, other): warnings.warn(">> is not implemented. Did you mean to convert " "something to this quantity as a unit using '<<'?", AstropyWarning) return NotImplemented # Also define __rshift__ and __irshift__ so we override default ndarray # behaviour, but instead of emitting a warning here, let it be done by # other (which likely is a unit if this was a mistake). def __rshift__(self, other): return NotImplemented def __irshift__(self, other): return NotImplemented # Arithmetic operations def __mul__(self, other): """ Multiplication between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): try: return self._new_view(self.copy(), other * self.unit) except UnitsError: # let other try to deal with it return NotImplemented return super().__mul__(other) def __imul__(self, other): """In-place multiplication between `Quantity` objects and others.""" if isinstance(other, (UnitBase, str)): self._set_unit(other * self.unit) return self return super().__imul__(other) def __rmul__(self, other): """ Right Multiplication between `Quantity` objects and other objects. """ return self.__mul__(other) def __truediv__(self, other): """ Division between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): try: return self._new_view(self.copy(), self.unit / other) except UnitsError: # let other try to deal with it return NotImplemented return super().__truediv__(other) def __itruediv__(self, other): """Inplace division between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): self._set_unit(self.unit / other) return self return super().__itruediv__(other) def __rtruediv__(self, other): """ Right Division between `Quantity` objects and other objects.""" if isinstance(other, (UnitBase, str)): return self._new_view(1. / self.value, other / self.unit) return super().__rtruediv__(other) def __pow__(self, other): if isinstance(other, Fraction): # Avoid getting object arrays by raising the value to a Fraction. return self._new_view(self.value ** float(other), self.unit ** other) return super().__pow__(other) # other overrides of special functions def __hash__(self): return hash(self.value) ^ hash(self.unit) def __iter__(self): if self.isscalar: raise TypeError( "'{cls}' object with a scalar value is not iterable" .format(cls=self.__class__.__name__)) # Otherwise return a generator def quantity_iter(): for val in self.value: yield self._new_view(val) return quantity_iter() def __getitem__(self, key): if isinstance(key, str) and isinstance(self.unit, StructuredUnit): return self._new_view(self.view(np.ndarray)[key], self.unit[key]) try: out = super().__getitem__(key) except IndexError: # We want zero-dimensional Quantity objects to behave like scalars, # so they should raise a TypeError rather than an IndexError. if self.isscalar: raise TypeError( "'{cls}' object with a scalar value does not support " "indexing".format(cls=self.__class__.__name__)) else: raise # For single elements, ndarray.__getitem__ returns scalars; these # need a new view as a Quantity. if not isinstance(out, np.ndarray): out = self._new_view(out) return out def __setitem__(self, i, value): if isinstance(i, str): # Indexing will cause a different unit, so by doing this in # two steps we effectively try with the right unit. self[i][...] = value return # update indices in info if the info property has been accessed # (in which case 'info' in self.__dict__ is True; this is guaranteed # to be the case if we're part of a table). if not self.isscalar and 'info' in self.__dict__: self.info.adjust_indices(i, value, len(self)) self.view(np.ndarray).__setitem__(i, self._to_own_unit(value)) # __contains__ is OK def __bool__(self): """Quantities should always be treated as non-False; there is too much potential for ambiguity otherwise. """ warnings.warn('The truth value of a Quantity is ambiguous. ' 'In the future this will raise a ValueError.', AstropyDeprecationWarning) return True def __len__(self): if self.isscalar: raise TypeError("'{cls}' object with a scalar value has no " "len()".format(cls=self.__class__.__name__)) else: return len(self.value) # Numerical types def __float__(self): try: return float(self.to_value(dimensionless_unscaled)) except (UnitsError, TypeError): raise TypeError('only dimensionless scalar quantities can be ' 'converted to Python scalars') def __int__(self): try: return int(self.to_value(dimensionless_unscaled)) except (UnitsError, TypeError): raise TypeError('only dimensionless scalar quantities can be ' 'converted to Python scalars') def __index__(self): # for indices, we do not want to mess around with scaling at all, # so unlike for float, int, we insist here on unscaled dimensionless try: assert self.unit.is_unity() return self.value.__index__() except Exception: raise TypeError('only integer dimensionless scalar quantities ' 'can be converted to a Python index') # TODO: we may want to add a hook for dimensionless quantities? @property def _unitstr(self): if self.unit is None: unitstr = _UNIT_NOT_INITIALISED else: unitstr = str(self.unit) if unitstr: unitstr = ' ' + unitstr return unitstr def to_string(self, unit=None, precision=None, format=None, subfmt=None): """ Generate a string representation of the quantity and its unit. The behavior of this function can be altered via the `numpy.set_printoptions` function and its various keywords. The exception to this is the ``threshold`` keyword, which is controlled via the ``[units.quantity]`` configuration item ``latex_array_threshold``. This is treated separately because the numpy default of 1000 is too big for most browsers to handle. Parameters ---------- unit : unit-like, optional Specifies the unit. If not provided, the unit used to initialize the quantity will be used. precision : number, optional The level of decimal precision. If `None`, or not provided, it will be determined from NumPy print options. format : str, optional The format of the result. If not provided, an unadorned string is returned. Supported values are: - 'latex': Return a LaTeX-formatted string subfmt : str, optional Subformat of the result. For the moment, only used for format="latex". Supported values are: - 'inline': Use ``$ ... $`` as delimiters. - 'display': Use ``$\\displaystyle ... $`` as delimiters. Returns ------- str A string with the contents of this Quantity """ if unit is not None and unit != self.unit: return self.to(unit).to_string( unit=None, precision=precision, format=format, subfmt=subfmt) formats = { None: None, "latex": { None: ("$", "$"), "inline": ("$", "$"), "display": (r"$\displaystyle ", r"$"), }, } if format not in formats: raise ValueError(f"Unknown format '{format}'") elif format is None: if precision is None: # Use default formatting settings return f'{self.value}{self._unitstr:s}' else: # np.array2string properly formats arrays as well as scalars return np.array2string(self.value, precision=precision, floatmode="fixed") + self._unitstr # else, for the moment we assume format="latex" # Set the precision if set, otherwise use numpy default pops = np.get_printoptions() format_spec = f".{precision if precision is not None else pops['precision']}g" def float_formatter(value): return Latex.format_exponential_notation(value, format_spec=format_spec) def complex_formatter(value): return '({}{}i)'.format( Latex.format_exponential_notation(value.real, format_spec=format_spec), Latex.format_exponential_notation(value.imag, format_spec='+' + format_spec)) # The view is needed for the scalar case - self.value might be float. latex_value = np.array2string( self.view(np.ndarray), threshold=(conf.latex_array_threshold if conf.latex_array_threshold > -1 else pops['threshold']), formatter={'float_kind': float_formatter, 'complex_kind': complex_formatter}, max_line_width=np.inf, separator=',~') latex_value = latex_value.replace('...', r'\dots') # Format unit # [1:-1] strips the '$' on either side needed for math mode latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode if self.unit is not None else _UNIT_NOT_INITIALISED) delimiter_left, delimiter_right = formats[format][subfmt] return rf'{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}' def __str__(self): return self.to_string() def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=prefixstr) return f'{prefixstr}{arrstr}{self._unitstr:s}>' def _repr_latex_(self): """ Generate a latex representation of the quantity and its unit. Returns ------- lstr A LaTeX string with the contents of this Quantity """ # NOTE: This should change to display format in a future release return self.to_string(format='latex', subfmt='inline') def __format__(self, format_spec): """ Format quantities using the new-style python formatting codes as specifiers for the number. If the format specifier correctly applies itself to the value, then it is used to format only the value. If it cannot be applied to the value, then it is applied to the whole string. """ try: value = format(self.value, format_spec) full_format_spec = "s" except ValueError: value = self.value full_format_spec = format_spec return format(f"{value}{self._unitstr:s}", full_format_spec) def decompose(self, bases=[]): """ Generates a new `Quantity` with the units decomposed. Decomposed units have only irreducible units in them (see `astropy.units.UnitBase.decompose`). Parameters ---------- bases : sequence of `~astropy.units.UnitBase`, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `~astropy.units.UnitsError` if it's not possible to do so. Returns ------- newq : `~astropy.units.Quantity` A new object equal to this quantity with units decomposed. """ return self._decompose(False, bases=bases) def _decompose(self, allowscaledunits=False, bases=[]): """ Generates a new `Quantity` with the units decomposed. Decomposed units have only irreducible units in them (see `astropy.units.UnitBase.decompose`). Parameters ---------- allowscaledunits : bool If True, the resulting `Quantity` may have a scale factor associated with it. If False, any scaling in the unit will be subsumed into the value of the resulting `Quantity` bases : sequence of UnitBase, optional The bases to decompose into. When not provided, decomposes down to any irreducible units. When provided, the decomposed result will only contain the given units. This will raises a `~astropy.units.UnitsError` if it's not possible to do so. Returns ------- newq : `~astropy.units.Quantity` A new object equal to this quantity with units decomposed. """ new_unit = self.unit.decompose(bases=bases) # Be careful here because self.value usually is a view of self; # be sure that the original value is not being modified. if not allowscaledunits and hasattr(new_unit, 'scale'): new_value = self.value * new_unit.scale new_unit = new_unit / new_unit.scale return self._new_view(new_value, new_unit) else: return self._new_view(self.copy(), new_unit) # These functions need to be overridden to take into account the units # Array conversion # https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion def item(self, *args): """Copy an element of an array to a scalar Quantity and return it. Like :meth:`~numpy.ndarray.item` except that it always returns a `Quantity`, not a Python scalar. """ return self._new_view(super().item(*args)) def tolist(self): raise NotImplementedError("cannot make a list of Quantities. Get " "list of values with q.value.tolist()") def _to_own_unit(self, value, check_precision=True): try: _value = value.to_value(self.unit) except AttributeError: # We're not a Quantity. # First remove two special cases (with a fast test): # 1) Maybe masked printing? MaskedArray with quantities does not # work very well, but no reason to break even repr and str. # 2) np.ma.masked? useful if we're a MaskedQuantity. if (value is np.ma.masked or (value is np.ma.masked_print_option and self.dtype.kind == 'O')): return value # Now, let's try a more general conversion. # Plain arrays will be converted to dimensionless in the process, # but anything with a unit attribute will use that. try: as_quantity = Quantity(value) _value = as_quantity.to_value(self.unit) except UnitsError: # last chance: if this was not something with a unit # and is all 0, inf, or nan, we treat it as arbitrary unit. if (not hasattr(value, 'unit') and can_have_arbitrary_unit(as_quantity.value)): _value = as_quantity.value else: raise if self.dtype.kind == 'i' and check_precision: # If, e.g., we are casting float to int, we want to fail if # precision is lost, but let things pass if it works. _value = np.array(_value, copy=False, subok=True) if not np.can_cast(_value.dtype, self.dtype): self_dtype_array = np.array(_value, self.dtype, subok=True) if not np.all(np.logical_or(self_dtype_array == _value, np.isnan(_value))): raise TypeError("cannot convert value type to array type " "without precision loss") # Setting names to ensure things like equality work (note that # above will have failed already if units did not match). if self.dtype.names: _value.dtype.names = self.dtype.names return _value def itemset(self, *args): if len(args) == 0: raise ValueError("itemset must have at least one argument") self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),))) def tostring(self, order='C'): raise NotImplementedError("cannot write Quantities to string. Write " "array with q.value.tostring(...).") def tobytes(self, order='C'): raise NotImplementedError("cannot write Quantities to string. Write " "array with q.value.tobytes(...).") def tofile(self, fid, sep="", format="%s"): raise NotImplementedError("cannot write Quantities to file. Write " "array with q.value.tofile(...)") def dump(self, file): raise NotImplementedError("cannot dump Quantities to file. Write " "array with q.value.dump()") def dumps(self): raise NotImplementedError("cannot dump Quantities to string. Write " "array with q.value.dumps()") # astype, byteswap, copy, view, getfield, setflags OK as is def fill(self, value): self.view(np.ndarray).fill(self._to_own_unit(value)) # Shape manipulation: resize cannot be done (does not own data), but # shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only # the flat iterator needs to be overwritten, otherwise single items are # returned as numbers. @property def flat(self): """A 1-D iterator over the Quantity array. This returns a ``QuantityIterator`` instance, which behaves the same as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`, and is similar to, but not a subclass of, Python's built-in iterator object. """ return QuantityIterator(self) @flat.setter def flat(self, value): y = self.ravel() y[:] = value # Item selection and manipulation # repeat, sort, compress, diagonal OK def take(self, indices, axis=None, out=None, mode='raise'): out = super().take(indices, axis=axis, out=out, mode=mode) # For single elements, ndarray.take returns scalars; these # need a new view as a Quantity. if type(out) is not type(self): out = self._new_view(out) return out def put(self, indices, values, mode='raise'): self.view(np.ndarray).put(indices, self._to_own_unit(values), mode) def choose(self, choices, out=None, mode='raise'): raise NotImplementedError("cannot choose based on quantity. Choose " "using array with q.value.choose(...)") # ensure we do not return indices as quantities def argsort(self, axis=-1, kind='quicksort', order=None): return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order) def searchsorted(self, v, *args, **kwargs): return np.searchsorted(np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs) # avoid numpy 1.6 problem def argmax(self, axis=None, out=None): return self.view(np.ndarray).argmax(axis, out=out) def argmin(self, axis=None, out=None): return self.view(np.ndarray).argmin(axis, out=out) def __array_function__(self, function, types, args, kwargs): """Wrap numpy functions, taking care of units. Parameters ---------- function : callable Numpy function to wrap types : iterable of classes Classes that provide an ``__array_function__`` override. Can in principle be used to interact with other classes. Below, mostly passed on to `~numpy.ndarray`, which can only interact with subclasses. args : tuple Positional arguments provided in the function call. kwargs : dict Keyword arguments provided in the function call. Returns ------- result: `~astropy.units.Quantity`, `~numpy.ndarray` As appropriate for the function. If the function is not supported, `NotImplemented` is returned, which will lead to a `TypeError` unless another argument overrode the function. Raises ------ ~astropy.units.UnitsError If operands have incompatible units. """ # A function should be in one of the following sets or dicts: # 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation # supports Quantity; we pass on to ndarray.__array_function__. # 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable # after converting quantities to arrays with suitable units, # and possibly setting units on the result. # 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but # requires a Quantity-specific implementation. # 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense. # For now, since we may not yet have complete coverage, if a # function is in none of the above, we simply call the numpy # implementation. if function in SUBCLASS_SAFE_FUNCTIONS: return super().__array_function__(function, types, args, kwargs) elif function in FUNCTION_HELPERS: function_helper = FUNCTION_HELPERS[function] try: args, kwargs, unit, out = function_helper(*args, **kwargs) except NotImplementedError: return self._not_implemented_or_raise(function, types) result = super().__array_function__(function, types, args, kwargs) # Fall through to return section elif function in DISPATCHED_FUNCTIONS: dispatched_function = DISPATCHED_FUNCTIONS[function] try: result, unit, out = dispatched_function(*args, **kwargs) except NotImplementedError: return self._not_implemented_or_raise(function, types) # Fall through to return section elif function in UNSUPPORTED_FUNCTIONS: return NotImplemented else: warnings.warn("function '{}' is not known to astropy's Quantity. " "Will run it anyway, hoping it will treat ndarray " "subclasses correctly. Please raise an issue at " "https://github.com/astropy/astropy/issues. " .format(function.__name__), AstropyWarning) return super().__array_function__(function, types, args, kwargs) # If unit is None, a plain array is expected (e.g., boolean), which # means we're done. # We're also done if the result was NotImplemented, which can happen # if other inputs/outputs override __array_function__; # hopefully, they can then deal with us. if unit is None or result is NotImplemented: return result return self._result_as_quantity(result, unit, out=out) def _not_implemented_or_raise(self, function, types): # Our function helper or dispatcher found that the function does not # work with Quantity. In principle, there may be another class that # knows what to do with us, for which we should return NotImplemented. # But if there is ndarray (or a non-Quantity subclass of it) around, # it quite likely coerces, so we should just break. if any(issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types): raise TypeError("the Quantity implementation cannot handle {} " "with the given arguments." .format(function)) from None else: return NotImplemented # Calculation -- override ndarray methods to take into account units. # We use the corresponding numpy functions to evaluate the results, since # the methods do not always allow calling with keyword arguments. # For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives # TypeError: 'a_max' is an invalid keyword argument for this function. def _wrap_function(self, function, *args, unit=None, out=None, **kwargs): """Wrap a numpy function that processes self, returning a Quantity. Parameters ---------- function : callable Numpy function to wrap. args : positional arguments Any positional arguments to the function beyond the first argument (which will be set to ``self``). kwargs : keyword arguments Keyword arguments to the function. If present, the following arguments are treated specially: unit : `~astropy.units.Unit` Unit of the output result. If not given, the unit of ``self``. out : `~astropy.units.Quantity` A Quantity instance in which to store the output. Notes ----- Output should always be assigned via a keyword argument, otherwise no proper account of the unit is taken. Returns ------- out : `~astropy.units.Quantity` Result of the function call, with the unit set properly. """ if unit is None: unit = self.unit # Ensure we don't loop back by turning any Quantity into array views. args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity) else arg) for arg in args) if out is not None: # If pre-allocated output is used, check it is suitable. # This also returns array view, to ensure we don't loop back. arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray)) kwargs['out'] = check_output(out, unit, arrays, function=function) # Apply the function and turn it back into a Quantity. result = function(*args, **kwargs) return self._result_as_quantity(result, unit, out) def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, unit=self.unit**2) def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims) def mean(self, axis=None, dtype=None, out=None, keepdims=False): return self._wrap_function(np.mean, axis, dtype, out=out, keepdims=keepdims) def round(self, decimals=0, out=None): return self._wrap_function(np.round, decimals, out=out) def dot(self, b, out=None): result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled) return self._wrap_function(np.dot, b, out=out, unit=result_unit) # Calculation: override methods that do not make sense. def all(self, axis=None, out=None): raise TypeError("cannot evaluate truth value of quantities. " "Evaluate array with q.value.all(...)") def any(self, axis=None, out=None): raise TypeError("cannot evaluate truth value of quantities. " "Evaluate array with q.value.any(...)") # Calculation: numpy functions that can be overridden with methods. def diff(self, n=1, axis=-1): return self._wrap_function(np.diff, n, axis) def ediff1d(self, to_end=None, to_begin=None): return self._wrap_function(np.ediff1d, to_end, to_begin) def nansum(self, axis=None, out=None, keepdims=False): return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims) def insert(self, obj, values, axis=None): """ Insert values along the given axis before the given indices and return a new `~astropy.units.Quantity` object. This is a thin wrapper around the `numpy.insert` function. Parameters ---------- obj : int, slice or sequence of int Object that defines the index or indices before which ``values`` is inserted. values : array-like Values to insert. If the type of ``values`` is different from that of quantity, ``values`` is converted to the matching type. ``values`` should be shaped so that it can be broadcast appropriately The unit of ``values`` must be consistent with this quantity. axis : int, optional Axis along which to insert ``values``. If ``axis`` is None then the quantity array is flattened before insertion. Returns ------- out : `~astropy.units.Quantity` A copy of quantity with ``values`` inserted. Note that the insertion does not occur in-place: a new quantity array is returned. Examples -------- >>> import astropy.units as u >>> q = [1, 2] * u.m >>> q.insert(0, 50 * u.cm) <Quantity [ 0.5, 1., 2.] m> >>> q = [[1, 2], [3, 4]] * u.m >>> q.insert(1, [10, 20] * u.m, axis=0) <Quantity [[ 1., 2.], [ 10., 20.], [ 3., 4.]] m> >>> q.insert(1, 10 * u.m, axis=1) <Quantity [[ 1., 10., 2.], [ 3., 10., 4.]] m> """ out_array = np.insert(self.value, obj, self._to_own_unit(values), axis) return self._new_view(out_array) class SpecificTypeQuantity(Quantity): """Superclass for Quantities of specific physical type. Subclasses of these work just like :class:`~astropy.units.Quantity`, except that they are for specific physical types (and may have methods that are only appropriate for that type). Astropy examples are :class:`~astropy.coordinates.Angle` and :class:`~astropy.coordinates.Distance` At a minimum, subclasses should set ``_equivalent_unit`` to the unit associated with the physical type. """ # The unit for the specific physical type. Instances can only be created # with units that are equivalent to this. _equivalent_unit = None # The default unit used for views. Even with `None`, views of arrays # without units are possible, but will have an uninitialized unit. _unit = None # Default unit for initialization through the constructor. _default_unit = None # ensure that we get precedence over our superclass. __array_priority__ = Quantity.__array_priority__ + 10 def __quantity_subclass__(self, unit): if unit.is_equivalent(self._equivalent_unit): return type(self), True else: return super().__quantity_subclass__(unit)[0], False def _set_unit(self, unit): if unit is None or not unit.is_equivalent(self._equivalent_unit): raise UnitTypeError( "{} instances require units equivalent to '{}'" .format(type(self).__name__, self._equivalent_unit) + (", but no unit was given." if unit is None else f", so cannot set it to '{unit}'.")) super()._set_unit(unit) def isclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs): """ Return a boolean array where two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.isclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See also -------- allclose """ unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol) return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs) def allclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs) -> bool: """ Whether two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.allclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See also -------- isclose """ unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol) return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs) def _unquantify_allclose_arguments(actual, desired, rtol, atol): actual = Quantity(actual, subok=True, copy=False) desired = Quantity(desired, subok=True, copy=False) try: desired = desired.to(actual.unit) except UnitsError: raise UnitsError( f"Units for 'desired' ({desired.unit}) and 'actual' " f"({actual.unit}) are not convertible" ) if atol is None: # By default, we assume an absolute tolerance of zero in the # appropriate units. The default value of None for atol is # needed because the units of atol must be consistent with the # units for a and b. atol = Quantity(0) else: atol = Quantity(atol, subok=True, copy=False) try: atol = atol.to(actual.unit) except UnitsError: raise UnitsError( f"Units for 'atol' ({atol.unit}) and 'actual' " f"({actual.unit}) are not convertible" ) rtol = Quantity(rtol, subok=True, copy=False) try: rtol = rtol.to(dimensionless_unscaled) except Exception: raise UnitsError("'rtol' should be dimensionless") return actual.value, desired.value, rtol.value, atol.value
371fafa4888342a351cf03ba1364af408b431c316fa826efc66ab9ead0f5a738
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the SI units. They are also available in the `astropy.units` namespace. """ from astropy.constants import si as _si from .core import UnitBase, Unit, def_unit import numpy as _numpy _ns = globals() ########################################################################### # DIMENSIONLESS def_unit(['percent', 'pct'], Unit(0.01), namespace=_ns, prefixes=False, doc="percent: one hundredth of unity, factor 0.01", format={'generic': '%', 'console': '%', 'cds': '%', 'latex': r'\%', 'unicode': '%'}) ########################################################################### # LENGTH def_unit(['m', 'meter'], namespace=_ns, prefixes=True, doc="meter: base unit of length in SI") def_unit(['micron'], um, namespace=_ns, doc="micron: alias for micrometer (um)", format={'latex': r'\mu m', 'unicode': '\N{MICRO SIGN}m'}) def_unit(['Angstrom', 'AA', 'angstrom'], 0.1 * nm, namespace=_ns, doc="ångström: 10 ** -10 m", prefixes=[(['m', 'milli'], ['milli', 'm'], 1.e-3)], format={'latex': r'\mathring{A}', 'unicode': 'Å', 'vounit': 'Angstrom'}) ########################################################################### # VOLUMES def_unit((['l', 'L'], ['liter']), 1000 * cm ** 3.0, namespace=_ns, prefixes=True, format={'latex': r'\mathcal{l}', 'unicode': 'ℓ'}, doc="liter: metric unit of volume") ########################################################################### # ANGULAR MEASUREMENTS def_unit(['rad', 'radian'], namespace=_ns, prefixes=True, doc="radian: angular measurement of the ratio between the length " "on an arc and its radius") def_unit(['deg', 'degree'], _numpy.pi / 180.0 * rad, namespace=_ns, prefixes=True, doc="degree: angular measurement 1/360 of full rotation", format={'latex': r'{}^{\circ}', 'unicode': '°'}) def_unit(['hourangle'], 15.0 * deg, namespace=_ns, prefixes=False, doc="hour angle: angular measurement with 24 in a full circle", format={'latex': r'{}^{h}', 'unicode': 'ʰ'}) def_unit(['arcmin', 'arcminute'], 1.0 / 60.0 * deg, namespace=_ns, prefixes=True, doc="arc minute: angular measurement", format={'latex': r'{}^{\prime}', 'unicode': '′'}) def_unit(['arcsec', 'arcsecond'], 1.0 / 3600.0 * deg, namespace=_ns, prefixes=True, doc="arc second: angular measurement") # These special formats should only be used for the non-prefix versions arcsec._format = {'latex': r'{}^{\prime\prime}', 'unicode': '″'} def_unit(['mas'], 0.001 * arcsec, namespace=_ns, doc="milli arc second: angular measurement") def_unit(['uas'], 0.000001 * arcsec, namespace=_ns, doc="micro arc second: angular measurement", format={'latex': r'\mu as', 'unicode': 'μas'}) def_unit(['sr', 'steradian'], rad ** 2, namespace=_ns, prefixes=True, doc="steradian: unit of solid angle in SI") ########################################################################### # TIME def_unit(['s', 'second'], namespace=_ns, prefixes=True, exclude_prefixes=['a'], doc="second: base unit of time in SI.") def_unit(['min', 'minute'], 60 * s, prefixes=True, namespace=_ns) def_unit(['h', 'hour', 'hr'], 3600 * s, namespace=_ns, prefixes=True, exclude_prefixes=['p']) def_unit(['d', 'day'], 24 * h, namespace=_ns, prefixes=True, exclude_prefixes=['c', 'y']) def_unit(['sday'], 86164.09053 * s, namespace=_ns, doc="Sidereal day (sday) is the time of one rotation of the Earth.") def_unit(['wk', 'week'], 7 * day, namespace=_ns) def_unit(['fortnight'], 2 * wk, namespace=_ns) def_unit(['a', 'annum'], 365.25 * d, namespace=_ns, prefixes=True, exclude_prefixes=['P']) def_unit(['yr', 'year'], 365.25 * d, namespace=_ns, prefixes=True) ########################################################################### # FREQUENCY def_unit(['Hz', 'Hertz', 'hertz'], 1 / s, namespace=_ns, prefixes=True, doc="Frequency") ########################################################################### # MASS def_unit(['kg', 'kilogram'], namespace=_ns, doc="kilogram: base unit of mass in SI.") def_unit(['g', 'gram'], 1.0e-3 * kg, namespace=_ns, prefixes=True, exclude_prefixes=['k', 'kilo']) def_unit(['t', 'tonne'], 1000 * kg, namespace=_ns, doc="Metric tonne") ########################################################################### # AMOUNT OF SUBSTANCE def_unit(['mol', 'mole'], namespace=_ns, prefixes=True, doc="mole: amount of a chemical substance in SI.") ########################################################################### # TEMPERATURE def_unit( ['K', 'Kelvin'], namespace=_ns, prefixes=True, doc="Kelvin: temperature with a null point at absolute zero.") def_unit( ['deg_C', 'Celsius'], namespace=_ns, doc='Degrees Celsius', format={'latex': r'{}^{\circ}C', 'unicode': '°C'}) ########################################################################### # FORCE def_unit(['N', 'Newton', 'newton'], kg * m * s ** -2, namespace=_ns, prefixes=True, doc="Newton: force") ########################################################################## # ENERGY def_unit(['J', 'Joule', 'joule'], N * m, namespace=_ns, prefixes=True, doc="Joule: energy") def_unit(['eV', 'electronvolt'], _si.e.value * J, namespace=_ns, prefixes=True, doc="Electron Volt") ########################################################################## # PRESSURE def_unit(['Pa', 'Pascal', 'pascal'], J * m ** -3, namespace=_ns, prefixes=True, doc="Pascal: pressure") ########################################################################### # POWER def_unit(['W', 'Watt', 'watt'], J / s, namespace=_ns, prefixes=True, doc="Watt: power") ########################################################################### # ELECTRICAL def_unit(['A', 'ampere', 'amp'], namespace=_ns, prefixes=True, doc="ampere: base unit of electric current in SI") def_unit(['C', 'coulomb'], A * s, namespace=_ns, prefixes=True, doc="coulomb: electric charge") def_unit(['V', 'Volt', 'volt'], J * C ** -1, namespace=_ns, prefixes=True, doc="Volt: electric potential or electromotive force") def_unit((['Ohm', 'ohm'], ['Ohm']), V * A ** -1, namespace=_ns, prefixes=True, doc="Ohm: electrical resistance", format={'latex': r'\Omega', 'unicode': 'Ω'}) def_unit(['S', 'Siemens', 'siemens'], A * V ** -1, namespace=_ns, prefixes=True, doc="Siemens: electrical conductance") def_unit(['F', 'Farad', 'farad'], C * V ** -1, namespace=_ns, prefixes=True, doc="Farad: electrical capacitance") ########################################################################### # MAGNETIC def_unit(['Wb', 'Weber', 'weber'], V * s, namespace=_ns, prefixes=True, doc="Weber: magnetic flux") def_unit(['T', 'Tesla', 'tesla'], Wb * m ** -2, namespace=_ns, prefixes=True, doc="Tesla: magnetic flux density") def_unit(['H', 'Henry', 'henry'], Wb * A ** -1, namespace=_ns, prefixes=True, doc="Henry: inductance") ########################################################################### # ILLUMINATION def_unit(['cd', 'candela'], namespace=_ns, prefixes=True, doc="candela: base unit of luminous intensity in SI") def_unit(['lm', 'lumen'], cd * sr, namespace=_ns, prefixes=True, doc="lumen: luminous flux") def_unit(['lx', 'lux'], lm * m ** -2, namespace=_ns, prefixes=True, doc="lux: luminous emittance") ########################################################################### # RADIOACTIVITY def_unit(['Bq', 'becquerel'], 1 / s, namespace=_ns, prefixes=False, doc="becquerel: unit of radioactivity") def_unit(['Ci', 'curie'], Bq * 3.7e10, namespace=_ns, prefixes=False, doc="curie: unit of radioactivity") ########################################################################### # BASES bases = set([m, s, kg, A, cd, rad, K, mol]) ########################################################################### # CLEANUP del UnitBase del Unit del def_unit ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
993c714f2008288a714601027ad005de4d23245e3e460918c66069eb7966a3f1
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines miscellaneous units. They are also available in the `astropy.units` namespace. """ from . import si from astropy.constants import si as _si from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes, set_enabled_units) # To ensure si units of the constants can be interpreted. set_enabled_units([si]) import numpy as _numpy _ns = globals() ########################################################################### # AREAS def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True, doc="barn: unit of area used in HEP") ########################################################################### # ANGULAR MEASUREMENTS def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad, namespace=_ns, prefixes=False, doc="cycle: angular measurement, a full turn or rotation") def_unit(['spat', 'sp'], 4.0 * _numpy.pi * si.sr, namespace=_ns, prefixes=False, doc="spat: the solid angle of the sphere, 4pi sr") ########################################################################## # PRESSURE def_unit(['bar'], 1e5 * si.Pa, namespace=_ns, prefixes=[(['m'], ['milli'], 1.e-3)], doc="bar: pressure") # The torr is almost the same as mmHg but not quite. # See https://en.wikipedia.org/wiki/Torr # Define the unit here despite it not being an astrophysical unit. # It may be moved if more similar units are created later. def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns, prefixes=[(['m'], ['milli'], 1.e-3)], doc="Unit of pressure based on an absolute scale, now defined as " "exactly 1/760 of a standard atmosphere") ########################################################################### # MASS def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass", format={'latex': r'M_{p}', 'unicode': 'Mₚ'}) def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass", format={'latex': r'M_{e}', 'unicode': 'Mₑ'}) # Unified atomic mass unit def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns, prefixes=True, exclude_prefixes=['a', 'da'], doc="Unified atomic mass unit") ########################################################################### # COMPUTER def_unit((['bit', 'b'], ['bit']), namespace=_ns, prefixes=si_prefixes + binary_prefixes) def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns, format={'vounit': 'byte'}, prefixes=si_prefixes + binary_prefixes, exclude_prefixes=['d']) def_unit((['pix', 'pixel'], ['pixel']), format={'ogip': 'pixel', 'vounit': 'pixel'}, namespace=_ns, prefixes=True) def_unit((['vox', 'voxel'], ['voxel']), format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'}, namespace=_ns, prefixes=True) ########################################################################### # CLEANUP del UnitBase del def_unit del si ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
0061167b3e4bd1f6fb17d8d99978a1e33af18cbfab566d5b5f3324597cd0cc4a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """A set of standard astronomical equivalencies.""" from collections import UserList # THIRD-PARTY import numpy as np import warnings # LOCAL from astropy.constants import si as _si from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.utils.misc import isiterable from . import si from . import cgs from . import astrophys from . import misc from .function import units as function_units from . import dimensionless_unscaled from .core import UnitsError, Unit __all__ = ['parallax', 'spectral', 'spectral_density', 'doppler_radio', 'doppler_optical', 'doppler_relativistic', 'doppler_redshift', 'mass_energy', 'brightness_temperature', 'thermodynamic_temperature', 'beam_angular_area', 'dimensionless_angles', 'logarithmic', 'temperature', 'temperature_energy', 'molar_mass_amu', 'pixel_scale', 'plate_scale', "Equivalency"] class Equivalency(UserList): """ A container for a units equivalency. Attributes ---------- name: `str` The name of the equivalency. kwargs: `dict` Any positional or keyword arguments used to make the equivalency. """ def __init__(self, equiv_list, name='', kwargs=None): self.data = equiv_list self.name = [name] self.kwargs = [kwargs] if kwargs is not None else [dict()] def __add__(self, other): if isinstance(other, Equivalency): new = super().__add__(other) new.name = self.name[:] + other.name new.kwargs = self.kwargs[:] + other.kwargs return new else: return self.data.__add__(other) def __eq__(self, other): return (isinstance(other, self.__class__) and self.name == other.name and self.kwargs == other.kwargs) def dimensionless_angles(): """Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1). It is special compared to other equivalency pairs in that it allows this independent of the power to which the angle is raised, and independent of whether it is part of a more complicated unit. """ return Equivalency([(si.radian, None)], "dimensionless_angles") def logarithmic(): """Allow logarithmic units to be converted to dimensionless fractions""" return Equivalency([ (dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.**x) ], "logarithmic") def parallax(): """ Returns a list of equivalence pairs that handle the conversion between parallax angle and distance. """ def parallax_converter(x): x = np.asanyarray(x) d = 1 / x if isiterable(d): d[d < 0] = np.nan return d else: if d < 0: return np.array(np.nan) else: return d return Equivalency([ (si.arcsecond, astrophys.parsec, parallax_converter) ], "parallax") def spectral(): """ Returns a list of equivalence pairs that handle spectral wavelength, wave number, frequency, and energy equivalencies. Allows conversions between wavelength units, wave number units, frequency units, and energy units as they relate to light. There are two types of wave number: * spectroscopic - :math:`1 / \\lambda` (per meter) * angular - :math:`2 \\pi / \\lambda` (radian per meter) """ hc = _si.h.value * _si.c.value two_pi = 2.0 * np.pi inv_m_spec = si.m ** -1 inv_m_ang = si.radian / si.m return Equivalency([ (si.m, si.Hz, lambda x: _si.c.value / x), (si.m, si.J, lambda x: hc / x), (si.Hz, si.J, lambda x: _si.h.value * x, lambda x: x / _si.h.value), (si.m, inv_m_spec, lambda x: 1.0 / x), (si.Hz, inv_m_spec, lambda x: x / _si.c.value, lambda x: _si.c.value * x), (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x), (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi), (si.m, inv_m_ang, lambda x: two_pi / x), (si.Hz, inv_m_ang, lambda x: two_pi * x / _si.c.value, lambda x: _si.c.value * x / two_pi), (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi) ], "spectral") def spectral_density(wav, factor=None): """ Returns a list of equivalence pairs that handle spectral density with regard to wavelength and frequency. Parameters ---------- wav : `~astropy.units.Quantity` `~astropy.units.Quantity` associated with values being converted (e.g., wavelength or frequency). Notes ----- The ``factor`` argument is left for backward-compatibility with the syntax ``spectral_density(unit, factor)`` but users are encouraged to use ``spectral_density(factor * unit)`` instead. """ from .core import UnitBase if isinstance(wav, UnitBase): if factor is None: raise ValueError( 'If `wav` is specified as a unit, `factor` should be set') wav = factor * wav # Convert to Quantity c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s h_cgs = _si.h.cgs.value # erg * s hc = c_Aps * h_cgs # flux density f_la = cgs.erg / si.angstrom / si.cm ** 2 / si.s f_nu = cgs.erg / si.Hz / si.cm ** 2 / si.s nu_f_nu = cgs.erg / si.cm ** 2 / si.s la_f_la = nu_f_nu phot_f_la = astrophys.photon / (si.cm ** 2 * si.s * si.AA) phot_f_nu = astrophys.photon / (si.cm ** 2 * si.s * si.Hz) la_phot_f_la = astrophys.photon / (si.cm ** 2 * si.s) # luminosity density L_nu = cgs.erg / si.s / si.Hz L_la = cgs.erg / si.s / si.angstrom nu_L_nu = cgs.erg / si.s la_L_la = nu_L_nu phot_L_la = astrophys.photon / (si.s * si.AA) phot_L_nu = astrophys.photon / (si.s * si.Hz) # surface brightness (flux equiv) S_la = cgs.erg / si.angstrom / si.cm ** 2 / si.s / si.sr S_nu = cgs.erg / si.Hz / si.cm ** 2 / si.s / si.sr nu_S_nu = cgs.erg / si.cm ** 2 / si.s / si.sr la_S_la = nu_S_nu phot_S_la = astrophys.photon / (si.cm ** 2 * si.s * si.AA * si.sr) phot_S_nu = astrophys.photon / (si.cm ** 2 * si.s * si.Hz * si.sr) # surface brightness (luminosity equiv) SL_nu = cgs.erg / si.s / si.Hz / si.sr SL_la = cgs.erg / si.s / si.angstrom / si.sr nu_SL_nu = cgs.erg / si.s / si.sr la_SL_la = nu_SL_nu phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr) phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr) def converter(x): return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps) def iconverter(x): return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps) def converter_f_nu_to_nu_f_nu(x): return x * wav.to_value(si.Hz, spectral()) def iconverter_f_nu_to_nu_f_nu(x): return x / wav.to_value(si.Hz, spectral()) def converter_f_la_to_la_f_la(x): return x * wav.to_value(si.AA, spectral()) def iconverter_f_la_to_la_f_la(x): return x / wav.to_value(si.AA, spectral()) def converter_phot_f_la_to_f_la(x): return hc * x / wav.to_value(si.AA, spectral()) def iconverter_phot_f_la_to_f_la(x): return x * wav.to_value(si.AA, spectral()) / hc def converter_phot_f_la_to_f_nu(x): return h_cgs * x * wav.to_value(si.AA, spectral()) def iconverter_phot_f_la_to_f_nu(x): return x / (wav.to_value(si.AA, spectral()) * h_cgs) def converter_phot_f_la_phot_f_nu(x): return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps def iconverter_phot_f_la_phot_f_nu(x): return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2 converter_phot_f_nu_to_f_nu = converter_phot_f_la_to_f_la iconverter_phot_f_nu_to_f_nu = iconverter_phot_f_la_to_f_la def converter_phot_f_nu_to_f_la(x): return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3 def iconverter_phot_f_nu_to_f_la(x): return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps) # for luminosity density converter_L_nu_to_nu_L_nu = converter_f_nu_to_nu_f_nu iconverter_L_nu_to_nu_L_nu = iconverter_f_nu_to_nu_f_nu converter_L_la_to_la_L_la = converter_f_la_to_la_f_la iconverter_L_la_to_la_L_la = iconverter_f_la_to_la_f_la converter_phot_L_la_to_L_la = converter_phot_f_la_to_f_la iconverter_phot_L_la_to_L_la = iconverter_phot_f_la_to_f_la converter_phot_L_la_to_L_nu = converter_phot_f_la_to_f_nu iconverter_phot_L_la_to_L_nu = iconverter_phot_f_la_to_f_nu converter_phot_L_la_phot_L_nu = converter_phot_f_la_phot_f_nu iconverter_phot_L_la_phot_L_nu = iconverter_phot_f_la_phot_f_nu converter_phot_L_nu_to_L_nu = converter_phot_f_nu_to_f_nu iconverter_phot_L_nu_to_L_nu = iconverter_phot_f_nu_to_f_nu converter_phot_L_nu_to_L_la = converter_phot_f_nu_to_f_la iconverter_phot_L_nu_to_L_la = iconverter_phot_f_nu_to_f_la return Equivalency([ # flux (f_la, f_nu, converter, iconverter), (f_nu, nu_f_nu, converter_f_nu_to_nu_f_nu, iconverter_f_nu_to_nu_f_nu), (f_la, la_f_la, converter_f_la_to_la_f_la, iconverter_f_la_to_la_f_la), (phot_f_la, f_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la), (phot_f_la, f_nu, converter_phot_f_la_to_f_nu, iconverter_phot_f_la_to_f_nu), (phot_f_la, phot_f_nu, converter_phot_f_la_phot_f_nu, iconverter_phot_f_la_phot_f_nu), (phot_f_nu, f_nu, converter_phot_f_nu_to_f_nu, iconverter_phot_f_nu_to_f_nu), (phot_f_nu, f_la, converter_phot_f_nu_to_f_la, iconverter_phot_f_nu_to_f_la), # integrated flux (la_phot_f_la, la_f_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la), # luminosity (L_la, L_nu, converter, iconverter), (L_nu, nu_L_nu, converter_L_nu_to_nu_L_nu, iconverter_L_nu_to_nu_L_nu), (L_la, la_L_la, converter_L_la_to_la_L_la, iconverter_L_la_to_la_L_la), (phot_L_la, L_la, converter_phot_L_la_to_L_la, iconverter_phot_L_la_to_L_la), (phot_L_la, L_nu, converter_phot_L_la_to_L_nu, iconverter_phot_L_la_to_L_nu), (phot_L_la, phot_L_nu, converter_phot_L_la_phot_L_nu, iconverter_phot_L_la_phot_L_nu), (phot_L_nu, L_nu, converter_phot_L_nu_to_L_nu, iconverter_phot_L_nu_to_L_nu), (phot_L_nu, L_la, converter_phot_L_nu_to_L_la, iconverter_phot_L_nu_to_L_la), # surface brightness (flux equiv) (S_la, S_nu, converter, iconverter), (S_nu, nu_S_nu, converter_f_nu_to_nu_f_nu, iconverter_f_nu_to_nu_f_nu), (S_la, la_S_la, converter_f_la_to_la_f_la, iconverter_f_la_to_la_f_la), (phot_S_la, S_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la), (phot_S_la, S_nu, converter_phot_f_la_to_f_nu, iconverter_phot_f_la_to_f_nu), (phot_S_la, phot_S_nu, converter_phot_f_la_phot_f_nu, iconverter_phot_f_la_phot_f_nu), (phot_S_nu, S_nu, converter_phot_f_nu_to_f_nu, iconverter_phot_f_nu_to_f_nu), (phot_S_nu, S_la, converter_phot_f_nu_to_f_la, iconverter_phot_f_nu_to_f_la), # surface brightness (luminosity equiv) (SL_la, SL_nu, converter, iconverter), (SL_nu, nu_SL_nu, converter_L_nu_to_nu_L_nu, iconverter_L_nu_to_nu_L_nu), (SL_la, la_SL_la, converter_L_la_to_la_L_la, iconverter_L_la_to_la_L_la), (phot_SL_la, SL_la, converter_phot_L_la_to_L_la, iconverter_phot_L_la_to_L_la), (phot_SL_la, SL_nu, converter_phot_L_la_to_L_nu, iconverter_phot_L_la_to_L_nu), (phot_SL_la, phot_SL_nu, converter_phot_L_la_phot_L_nu, iconverter_phot_L_la_phot_L_nu), (phot_SL_nu, SL_nu, converter_phot_L_nu_to_L_nu, iconverter_phot_L_nu_to_L_nu), (phot_SL_nu, SL_la, converter_phot_L_nu_to_L_la, iconverter_phot_L_nu_to_L_la), ], "spectral_density", {'wav': wav, 'factor': factor}) def doppler_radio(rest): r""" Return the equivalency pairs for the radio convention for velocity. The radio convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> radio_CO_equiv = u.doppler_radio(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv) >>> radio_velocity # doctest: +FLOAT_CMP <Quantity -31.209092088877583 km / s> """ assert_is_spectral_unit(rest) ckms = _si.c.to_value('km/s') def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return (restfreq-x) / (restfreq) * ckms def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x/ckms return restfreq * (1-voverc) def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return (x-restwav) / (x) * ckms def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return restwav * ckms / (ckms-x) def to_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) return (resten-x) / (resten) * ckms def from_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) voverc = x/ckms return resten * (1-voverc) return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq), (si.AA, si.km/si.s, to_vel_wav, from_vel_wav), (si.eV, si.km/si.s, to_vel_en, from_vel_en), ], "doppler_radio", {'rest': rest}) def doppler_optical(rest): r""" Return the equivalency pairs for the optical convention for velocity. The optical convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> optical_CO_equiv = u.doppler_optical(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv) >>> optical_velocity # doctest: +FLOAT_CMP <Quantity -31.20584348799674 km / s> """ assert_is_spectral_unit(rest) ckms = _si.c.to_value('km/s') def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return ckms * (restfreq-x) / x def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x/ckms return restfreq / (1+voverc) def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return ckms * (x/restwav-1) def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) voverc = x/ckms return restwav * (1+voverc) def to_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) return ckms * (resten-x) / x def from_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) voverc = x/ckms return resten / (1+voverc) return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq), (si.AA, si.km/si.s, to_vel_wav, from_vel_wav), (si.eV, si.km/si.s, to_vel_en, from_vel_en), ], "doppler_optical", {'rest': rest}) def doppler_relativistic(rest): r""" Return the equivalency pairs for the relativistic convention for velocity. The full relativistic convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv) >>> relativistic_velocity # doctest: +FLOAT_CMP <Quantity -31.207467619351537 km / s> >>> measured_velocity = 1250 * u.km/u.s >>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv) >>> relativistic_frequency # doctest: +FLOAT_CMP <Quantity 114.79156866993588 GHz> >>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv) >>> relativistic_wavelength # doctest: +FLOAT_CMP <Quantity 2.6116243681798923 mm> """ # noqa: E501 assert_is_spectral_unit(rest) ckms = _si.c.to_value('km/s') def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return (restfreq**2-x**2) / (restfreq**2+x**2) * ckms def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x/ckms return restfreq * ((1-voverc) / (1+(voverc)))**0.5 def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return (x**2-restwav**2) / (restwav**2+x**2) * ckms def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) voverc = x/ckms return restwav * ((1+voverc) / (1-voverc))**0.5 def to_vel_en(x): resten = rest.to_value(si.eV, spectral()) return (resten**2-x**2) / (resten**2+x**2) * ckms def from_vel_en(x): resten = rest.to_value(si.eV, spectral()) voverc = x/ckms return resten * ((1-voverc) / (1+(voverc)))**0.5 return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq), (si.AA, si.km/si.s, to_vel_wav, from_vel_wav), (si.eV, si.km/si.s, to_vel_en, from_vel_en), ], "doppler_relativistic", {'rest': rest}) def doppler_redshift(): """ Returns the equivalence between Doppler redshift (unitless) and radial velocity. .. note:: This equivalency is not compatible with cosmological redshift in `astropy.cosmology.units`. """ rv_unit = si.km / si.s C_KMS = _si.c.to_value(rv_unit) def convert_z_to_rv(z): zponesq = (1 + z) ** 2 return C_KMS * (zponesq - 1) / (zponesq + 1) def convert_rv_to_z(rv): beta = rv / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 return Equivalency([(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)], "doppler_redshift") def molar_mass_amu(): """ Returns the equivalence between amu and molar mass. """ return Equivalency([ (si.g/si.mol, misc.u) ], "molar_mass_amu") def mass_energy(): """ Returns a list of equivalence pairs that handle the conversion between mass and energy. """ return Equivalency([(si.kg, si.J, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), (si.kg / si.m ** 2, si.J / si.m ** 2, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), (si.kg / si.m ** 3, si.J / si.m ** 3, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), (si.kg / si.s, si.J / si.s, lambda x: x * _si.c.value ** 2, lambda x: x / _si.c.value ** 2), ], "mass_energy") def brightness_temperature(frequency, beam_area=None): r""" Defines the conversion between Jy/sr and "brightness temperature", :math:`T_B`, in Kelvins. The brightness temperature is a unit very commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy" (Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google books <https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__). :math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)` If the input is in Jy/beam or Jy (assuming it came from a single beam), the beam area is essential for this computation: the brightness temperature is inversely proportional to the beam area. Parameters ---------- frequency : `~astropy.units.Quantity` The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). The variable is named 'frequency' because it is more commonly used in radio astronomy. BACKWARD COMPATIBILITY NOTE: previous versions of the brightness temperature equivalency used the keyword ``disp``, which is no longer supported. beam_area : `~astropy.units.Quantity` ['solid angle'] Beam area in angular units, i.e. steradian equivalent Examples -------- Arecibo C-band beam:: >>> import numpy as np >>> from astropy import units as u >>> beam_sigma = 50*u.arcsec >>> beam_area = 2*np.pi*(beam_sigma)**2 >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 3.526295144567176 K> VLA synthetic beam:: >>> bmaj = 15*u.arcsec >>> bmin = 15*u.arcsec >>> fwhm_to_sigma = 1./(8*np.log(2))**0.5 >>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2) >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 217.2658703625732 K> Any generic surface brightness: >>> surf_brightness = 1e6*u.MJy/u.sr >>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP <Quantity 130.1931904778803 K> """ # noqa: E501 if frequency.unit.is_equivalent(si.sr): if not beam_area.unit.is_equivalent(si.Hz): raise ValueError("The inputs to `brightness_temperature` are " "frequency and angular area.") warnings.warn("The inputs to `brightness_temperature` have changed. " "Frequency is now the first input, and angular area " "is the second, optional input.", AstropyDeprecationWarning) frequency, beam_area = beam_area, frequency nu = frequency.to(si.GHz, spectral()) if beam_area is not None: beam = beam_area.to_value(si.sr) def convert_Jy_to_K(x_jybm): factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value return (x_jybm / beam / factor) def convert_K_to_Jy(x_K): factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value return (x_K * beam / factor) return Equivalency([(astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy), (astrophys.Jy/astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy)], "brightness_temperature", {'frequency': frequency, 'beam_area': beam_area}) # noqa: E501 else: def convert_JySr_to_K(x_jysr): factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value return (x_jysr / factor) def convert_K_to_JySr(x_K): factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value return (x_K / factor) # multiplied by 1x for 1 steradian return Equivalency([(astrophys.Jy/si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)], "brightness_temperature", {'frequency': frequency, 'beam_area': beam_area}) # noqa: E501 def beam_angular_area(beam_area): """ Convert between the ``beam`` unit, which is commonly used to express the area of a radio telescope resolution element, and an area on the sky. This equivalency also supports direct conversion between ``Jy/beam`` and ``Jy/steradian`` units, since that is a common operation. Parameters ---------- beam_area : unit-like The area of the beam in angular area units (e.g., steradians) Must have angular area equivalent units. """ return Equivalency([(astrophys.beam, Unit(beam_area)), (astrophys.beam**-1, Unit(beam_area)**-1), (astrophys.Jy/astrophys.beam, astrophys.Jy/Unit(beam_area))], "beam_angular_area", {'beam_area': beam_area}) def thermodynamic_temperature(frequency, T_cmb=None): r"""Defines the conversion between Jy/sr and "thermodynamic temperature", :math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very commonly used in cosmology. See eqn 8 in [1] :math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)` with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}` where :math:`x = h \nu / k T` Parameters ---------- frequency : `~astropy.units.Quantity` The observed `spectral` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). Must have spectral units. T_cmb : `~astropy.units.Quantity` ['temperature'] or None The CMB temperature at z=0. If `None`, the default cosmology will be used to get this temperature. Must have units of temperature. Notes ----- For broad band receivers, this conversion do not hold as it highly depends on the frequency References ---------- .. [1] Planck 2013 results. IX. HFI spectral response https://arxiv.org/abs/1303.5070 Examples -------- Planck HFI 143 GHz:: >>> from astropy import units as u >>> from astropy.cosmology import Planck15 >>> freq = 143 * u.GHz >>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0) >>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 0.37993172 MJy / sr> """ nu = frequency.to(si.GHz, spectral()) if T_cmb is None: from astropy.cosmology import default_cosmology T_cmb = default_cosmology.get().Tcmb0 def f(nu, T_cmb=T_cmb): x = _si.h * nu / _si.k_B / T_cmb return x**2 * np.exp(x) / np.expm1(x)**2 def convert_Jy_to_K(x_jybm): factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(astrophys.Jy) return x_jybm / factor def convert_K_to_Jy(x_K): factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(si.K) return x_K / factor return Equivalency([(astrophys.Jy/si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)], "thermodynamic_temperature", {'frequency': frequency, "T_cmb": T_cmb}) def temperature(): """Convert between Kelvin, Celsius, Rankine and Fahrenheit here because Unit and CompositeUnit cannot do addition or subtraction properly. """ from .imperial import deg_F, deg_R return Equivalency([ (si.K, si.deg_C, lambda x: x - 273.15, lambda x: x + 273.15), (si.deg_C, deg_F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8), (si.K, deg_F, lambda x: (x - 273.15) * 1.8 + 32.0, lambda x: ((x - 32.0) / 1.8) + 273.15), (deg_R, deg_F, lambda x: x - 459.67, lambda x: x + 459.67), (deg_R, si.deg_C, lambda x: (x - 491.67) * (5/9), lambda x: x * 1.8 + 491.67), (deg_R, si.K, lambda x: x * (5/9), lambda x: x * 1.8)], "temperature") def temperature_energy(): """Convert between Kelvin and keV(eV) to an equivalent amount.""" return Equivalency([ (si.K, si.eV, lambda x: x / (_si.e.value / _si.k_B.value), lambda x: x * (_si.e.value / _si.k_B.value))], "temperature_energy") def assert_is_spectral_unit(value): try: value.to(si.Hz, spectral()) except (AttributeError, UnitsError) as ex: raise UnitsError("The 'rest' value must be a spectral equivalent " "(frequency, wavelength, or energy).") def pixel_scale(pixscale): """ Convert between pixel distances (in units of ``pix``) and other units, given a particular ``pixscale``. Parameters ---------- pixscale : `~astropy.units.Quantity` The pixel scale either in units of <unit>/pixel or pixel/<unit>. """ decomposed = pixscale.unit.decompose() dimensions = dict(zip(decomposed.bases, decomposed.powers)) pix_power = dimensions.get(misc.pix, 0) if pix_power == -1: physical_unit = Unit(pixscale * misc.pix) elif pix_power == 1: physical_unit = Unit(misc.pix / pixscale) else: raise UnitsError( "The pixel scale unit must have" " pixel dimensionality of 1 or -1.") return Equivalency([(misc.pix, physical_unit)], "pixel_scale", {'pixscale': pixscale}) def plate_scale(platescale): """ Convert between lengths (to be interpreted as lengths in the focal plane) and angular units with a specified ``platescale``. Parameters ---------- platescale : `~astropy.units.Quantity` The pixel scale either in units of distance/pixel or distance/angle. """ if platescale.unit.is_equivalent(si.arcsec/si.m): platescale_val = platescale.to_value(si.radian/si.m) elif platescale.unit.is_equivalent(si.m/si.arcsec): platescale_val = (1/platescale).to_value(si.radian/si.m) else: raise UnitsError("The pixel scale must be in angle/distance or " "distance/angle") return Equivalency([(si.m, si.radian, lambda d: d*platescale_val, lambda rad: rad/platescale_val)], "plate_scale", {'platescale': platescale}) # ------------------------------------------------------------------------- def __getattr__(attr): if attr == "with_H0": import warnings from astropy.cosmology.units import with_H0 from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( ("`with_H0` is deprecated from `astropy.units.equivalencies` " "since astropy 5.0 and may be removed in a future version. " "Use `astropy.cosmology.units.with_H0` instead."), AstropyDeprecationWarning) return with_H0 raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
0a0542b6ed699e22a02bb5cf9ef3d04a8611ec79e5a9d1c7d72cfe2f82ef433a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains convenience functions for retrieving solar system ephemerides from jplephem. """ from urllib.parse import urlparse import os.path import numpy as np import erfa from .sky_coordinate import SkyCoord from astropy.utils.data import download_file from astropy.utils.decorators import classproperty, deprecated from astropy.utils.state import ScienceState from astropy.utils import indent from astropy import units as u from astropy.constants import c as speed_of_light from .representation import CartesianRepresentation, CartesianDifferential from .builtin_frames import GCRS, ICRS, ITRS, TETE from .builtin_frames.utils import get_jd12 __all__ = ["get_body", "get_moon", "get_body_barycentric", "get_body_barycentric_posvel", "solar_system_ephemeris"] DEFAULT_JPL_EPHEMERIS = 'de430' """List of kernel pairs needed to calculate positions of a given object.""" BODY_NAME_TO_KERNEL_SPEC = { 'sun': [(0, 10)], 'mercury': [(0, 1), (1, 199)], 'venus': [(0, 2), (2, 299)], 'earth-moon-barycenter': [(0, 3)], 'earth': [(0, 3), (3, 399)], 'moon': [(0, 3), (3, 301)], 'mars': [(0, 4)], 'jupiter': [(0, 5)], 'saturn': [(0, 6)], 'uranus': [(0, 7)], 'neptune': [(0, 8)], 'pluto': [(0, 9)], } """Indices to the plan94 routine for the given object.""" PLAN94_BODY_NAME_TO_PLANET_INDEX = { 'mercury': 1, 'venus': 2, 'earth-moon-barycenter': 3, 'mars': 4, 'jupiter': 5, 'saturn': 6, 'uranus': 7, 'neptune': 8, } _EPHEMERIS_NOTE = """ You can either give an explicit ephemeris or use a default, which is normally a built-in ephemeris that does not require ephemeris files. To change the default to be the JPL ephemeris:: >>> from astropy.coordinates import solar_system_ephemeris >>> solar_system_ephemeris.set('jpl') # doctest: +SKIP Use of any JPL ephemeris requires the jplephem package (https://pypi.org/project/jplephem/). If needed, the ephemeris file will be downloaded (and cached). One can check which bodies are covered by a given ephemeris using:: >>> solar_system_ephemeris.bodies ('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune') """[1:-1] class solar_system_ephemeris(ScienceState): """Default ephemerides for calculating positions of Solar-System bodies. This can be one of the following:: - 'builtin': polynomial approximations to the orbital elements. - 'de430', 'de432s', 'de440', 'de440s': short-cuts for recent JPL dynamical models. - 'jpl': Alias for the default JPL ephemeris (currently, 'de430'). - URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format. - PATH: (str) File path to a SPK ephemeris in SPICE binary (.bsp) format. - `None`: Ensure an Exception is raised without an explicit ephemeris. The default is 'builtin', which uses the ``epv00`` and ``plan94`` routines from the ``erfa`` implementation of the Standards Of Fundamental Astronomy library. Notes ----- Any file required will be downloaded (and cached) when the state is set. The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is ~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is ~10MB, and covers years 1950-2050 [2]_ (and similarly for the newer de440 and de440s). Older versions of the JPL ephemerides (such as the widely used de200) can be used via their URL [3]_. .. [1] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt .. [2] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt .. [3] https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/ """ _value = 'builtin' _kernel = None @classmethod def validate(cls, value): # make no changes if value is None if value is None: return cls._value # Set up Kernel; if the file is not in cache, this will download it. cls.get_kernel(value) return value @classmethod def get_kernel(cls, value): # ScienceState only ensures the `_value` attribute is up to date, # so we need to be sure any kernel returned is consistent. if cls._kernel is None or cls._kernel.origin != value: if cls._kernel is not None: cls._kernel.daf.file.close() cls._kernel = None kernel = _get_kernel(value) if kernel is not None: kernel.origin = value cls._kernel = kernel return cls._kernel @classproperty def kernel(cls): return cls.get_kernel(cls._value) @classproperty def bodies(cls): if cls._value is None: return None if cls._value.lower() == 'builtin': return (('earth', 'sun', 'moon') + tuple(PLAN94_BODY_NAME_TO_PLANET_INDEX.keys())) else: return tuple(BODY_NAME_TO_KERNEL_SPEC.keys()) def _get_kernel(value): """ Try importing jplephem, download/retrieve from cache the Satellite Planet Kernel corresponding to the given ephemeris. """ if value is None or value.lower() == 'builtin': return None try: from jplephem.spk import SPK except ImportError: raise ImportError("Solar system JPL ephemeris calculations require " "the jplephem package " "(https://pypi.org/project/jplephem/)") if value.lower() == 'jpl': value = DEFAULT_JPL_EPHEMERIS if value.lower() in ('de430', 'de432s', 'de440', 'de440s'): value = ('https://naif.jpl.nasa.gov/pub/naif/generic_kernels' '/spk/planets/{:s}.bsp'.format(value.lower())) elif os.path.isfile(value): return SPK.open(value) else: try: urlparse(value) except Exception: raise ValueError('{} was not one of the standard strings and ' 'could not be parsed as a file path or URL'.format(value)) return SPK.open(download_file(value, cache=True)) def _get_body_barycentric_posvel(body, time, ephemeris=None, get_velocity=True): """Calculate the barycentric position (and velocity) of a solar system body. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` get_velocity : bool, optional Whether or not to calculate the velocity as well as the position. Returns ------- position : `~astropy.coordinates.CartesianRepresentation` or tuple Barycentric (ICRS) position or tuple of position and velocity. Notes ----- Whether or not velocities are calculated makes little difference for the built-in ephemerides, but for most JPL ephemeris files, the execution time roughly doubles. """ # If the ephemeris is to be taken from solar_system_ephemeris, or the one # it already contains, use the kernel there. Otherwise, open the ephemeris, # possibly downloading it, but make sure the file is closed at the end. default_kernel = ephemeris is None or ephemeris is solar_system_ephemeris._value kernel = None try: if default_kernel: if solar_system_ephemeris.get() is None: raise ValueError(_EPHEMERIS_NOTE) kernel = solar_system_ephemeris.kernel else: kernel = _get_kernel(ephemeris) jd1, jd2 = get_jd12(time, 'tdb') if kernel is None: body = body.lower() earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2) if body == 'earth': body_pv_bary = earth_pv_bary elif body == 'moon': # The moon98 documentation notes that it takes TT, but that TDB leads # to errors smaller than the uncertainties in the algorithm. # moon98 returns the astrometric position relative to the Earth. moon_pv_geo = erfa.moon98(jd1, jd2) body_pv_bary = erfa.pvppv(moon_pv_geo, earth_pv_bary) else: sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio) if body == 'sun': body_pv_bary = sun_pv_bary else: try: body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body] except KeyError: raise KeyError("{}'s position and velocity cannot be " "calculated with the '{}' ephemeris." .format(body, ephemeris)) body_pv_helio = erfa.plan94(jd1, jd2, body_index) body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary) body_pos_bary = CartesianRepresentation( body_pv_bary['p'], unit=u.au, xyz_axis=-1, copy=False) if get_velocity: body_vel_bary = CartesianRepresentation( body_pv_bary['v'], unit=u.au/u.day, xyz_axis=-1, copy=False) else: if isinstance(body, str): # Look up kernel chain for JPL ephemeris, based on name try: kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()] except KeyError: raise KeyError("{}'s position cannot be calculated with " "the {} ephemeris.".format(body, ephemeris)) else: # otherwise, assume the user knows what their doing and intentionally # passed in a kernel chain kernel_spec = body # jplephem cannot handle multi-D arrays, so convert to 1D here. jd1_shape = getattr(jd1, 'shape', ()) if len(jd1_shape) > 1: jd1, jd2 = jd1.ravel(), jd2.ravel() # Note that we use the new jd1.shape here to create a 1D result array. # It is reshaped below. body_posvel_bary = np.zeros((2 if get_velocity else 1, 3) + getattr(jd1, 'shape', ())) for pair in kernel_spec: spk = kernel[pair] if spk.data_type == 3: # Type 3 kernels contain both position and velocity. posvel = spk.compute(jd1, jd2) if get_velocity: body_posvel_bary += posvel.reshape(body_posvel_bary.shape) else: body_posvel_bary[0] += posvel[:4] else: # spk.generate first yields the position and then the # derivative. If no velocities are desired, body_posvel_bary # has only one element and thus the loop ends after a single # iteration, avoiding the velocity calculation. for body_p_or_v, p_or_v in zip(body_posvel_bary, spk.generate(jd1, jd2)): body_p_or_v += p_or_v body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape body_pos_bary = CartesianRepresentation(body_posvel_bary[0], unit=u.km, copy=False) if get_velocity: body_vel_bary = CartesianRepresentation(body_posvel_bary[1], unit=u.km/u.day, copy=False) return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary finally: if not default_kernel and kernel is not None: kernel.daf.file.close() def get_body_barycentric_posvel(body, time, ephemeris=None): """Calculate the barycentric position and velocity of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation` Tuple of barycentric (ICRS) position and velocity. See also -------- get_body_barycentric : to calculate position only. This is faster by about a factor two for JPL kernels, but has no speed advantage for the built-in ephemeris. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris) def get_body_barycentric(body, time, ephemeris=None): """Calculate the barycentric position of a solar system body. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` Returns ------- position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) position of the body in cartesian coordinates See also -------- get_body_barycentric_posvel : to calculate both position and velocity. Notes ----- {_EPHEMERIS_NOTE} """ return _get_body_barycentric_posvel(body, time, ephemeris, get_velocity=False) def _get_apparent_body_position(body, time, ephemeris, obsgeoloc=None): """Calculate the apparent position of body ``body`` relative to Earth. This corrects for the light-travel time to the object. Parameters ---------- body : str or other The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. ephemeris : str, optional Ephemeris to use. By default, use the one set with ``~astropy.coordinates.solar_system_ephemeris.set`` obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, optional The GCRS position of the observer Returns ------- cartesian_position : `~astropy.coordinates.CartesianRepresentation` Barycentric (ICRS) apparent position of the body in cartesian coordinates Notes ----- {_EPHEMERIS_NOTE} """ if ephemeris is None: ephemeris = solar_system_ephemeris.get() # Calculate position given approximate light travel time. delta_light_travel_time = 20. * u.s emitted_time = time light_travel_time = 0. * u.s earth_loc = get_body_barycentric('earth', time, ephemeris) if obsgeoloc is not None: earth_loc += obsgeoloc while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s): body_loc = get_body_barycentric(body, emitted_time, ephemeris) earth_distance = (body_loc - earth_loc).norm() delta_light_travel_time = (light_travel_time - earth_distance/speed_of_light) light_travel_time = earth_distance/speed_of_light emitted_time = time - light_travel_time return get_body_barycentric(body, emitted_time, ephemeris) def get_body(body, time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- body : str or list of tuple The solar system body for which to calculate positions. Can also be a kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL kernel. time : `~astropy.time.Time` Time of observation. location : `~astropy.coordinates.EarthLocation`, optional Location of observer on the Earth. If not given, will be taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the body Notes ----- The coordinate returned is the apparent position, which is the position of the body at time *t* minus the light travel time from the *body* to the observing *location*. {_EPHEMERIS_NOTE} """ if location is None: location = time.location if location is not None: obsgeoloc, obsgeovel = location.get_gcrs_posvel(time) else: obsgeoloc, obsgeovel = None, None cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc) icrs = ICRS(cartrep) gcrs = icrs.transform_to(GCRS(obstime=time, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)) return SkyCoord(gcrs) def get_moon(time, location=None, ephemeris=None): """ Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed from a location on Earth in the `~astropy.coordinates.GCRS` reference system. Parameters ---------- time : `~astropy.time.Time` Time of observation location : `~astropy.coordinates.EarthLocation` Location of observer on the Earth. If none is supplied, taken from ``time`` (if not present, a geocentric observer will be assumed). ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). Returns ------- skycoord : `~astropy.coordinates.SkyCoord` GCRS Coordinate for the Moon Notes ----- The coordinate returned is the apparent position, which is the position of the moon at time *t* minus the light travel time from the moon to the observing *location*. {_EPHEMERIS_NOTE} """ return get_body('moon', time, location=location, ephemeris=ephemeris) # Add note about the ephemeris choices to the docstrings of relevant functions. # Note: sadly, one cannot use f-strings for docstrings, so we format explicitly. for f in [f for f in locals().values() if callable(f) and f.__doc__ is not None and '{_EPHEMERIS_NOTE}' in f.__doc__]: f.__doc__ = f.__doc__.format(_EPHEMERIS_NOTE=indent(_EPHEMERIS_NOTE)[4:]) deprecation_msg = """ The use of _apparent_position_in_true_coordinates is deprecated because astropy now implements a True Equator True Equinox Frame (TETE), which should be used instead. """ @deprecated('4.2', deprecation_msg) def _apparent_position_in_true_coordinates(skycoord): """ Convert Skycoord in GCRS frame into one in which RA and Dec are defined w.r.t to the true equinox and poles of the Earth """ location = getattr(skycoord, 'location', None) if location is None: gcrs_rep = skycoord.obsgeoloc.with_differentials( {'s': CartesianDifferential.from_cartesian(skycoord.obsgeovel)}) location = (GCRS(gcrs_rep, obstime=skycoord.obstime) .transform_to(ITRS(obstime=skycoord.obstime)) .earth_location) tete_frame = TETE(obstime=skycoord.obstime, location=location) return skycoord.transform_to(tete_frame)
1563e382ae85f07a5466953f66676305c11a4e5d7c68e2be33a9def8044217dc
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # This module includes files automatically generated from ply (these end in # _lextab.py and _parsetab.py). To generate these files, remove them from this # folder, then build astropy and run the tests in-place: # # python setup.py build_ext --inplace # pytest astropy/coordinates # # You can then commit the changes to the re-generated _lextab.py and # _parsetab.py files. """ This module contains formatting functions that are for internal use in astropy.coordinates.angles. Mainly they are conversions from one format of data to another. """ import os import threading from warnings import warn import numpy as np from .errors import (IllegalHourWarning, IllegalHourError, IllegalMinuteWarning, IllegalMinuteError, IllegalSecondWarning, IllegalSecondError) from astropy.utils import format_exception, parsing from astropy import units as u class _AngleParser: """ Parses the various angle formats including: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s * 1°2′3″N This class should not be used directly. Use `parse_angle` instead. """ # For safe multi-threaded operation all class (but not instance) # members that carry state should be thread-local. They are stored # in the following class member _thread_local = threading.local() def __init__(self): # TODO: in principle, the parser should be invalidated if we change unit # system (from CDS to FITS, say). Might want to keep a link to the # unit_registry used, and regenerate the parser/lexer if it changes. # Alternatively, perhaps one should not worry at all and just pre- # generate the parser for each release (as done for unit formats). # For some discussion of this problem, see # https://github.com/astropy/astropy/issues/5350#issuecomment-248770151 if '_parser' not in _AngleParser._thread_local.__dict__: (_AngleParser._thread_local._parser, _AngleParser._thread_local._lexer) = self._make_parser() @classmethod def _get_simple_unit_names(cls): simple_units = set( u.radian.find_equivalent_units(include_prefix_units=True)) simple_unit_names = set() # We filter out degree and hourangle, since those are treated # separately. for unit in simple_units: if unit != u.deg and unit != u.hourangle: simple_unit_names.update(unit.names) return sorted(simple_unit_names) @classmethod def _make_parser(cls): from astropy.extern.ply import lex, yacc # List of token names. tokens = ( 'SIGN', 'UINT', 'UFLOAT', 'COLON', 'DEGREE', 'HOUR', 'MINUTE', 'SECOND', 'SIMPLE_UNIT', 'EASTWEST', 'NORTHSOUTH' ) # NOTE THE ORDERING OF THESE RULES IS IMPORTANT!! # Regular expression rules for simple tokens def t_UFLOAT(t): r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?' # The above includes Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. t.value = float(t.value.replace('−', '-')) return t def t_UINT(t): r'\d+' t.value = int(t.value) return t def t_SIGN(t): r'[+−-]' # The above include Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. if t.value == '+': t.value = 1.0 else: t.value = -1.0 return t def t_EASTWEST(t): r'[EW]$' t.value = -1.0 if t.value == 'W' else 1.0 return t def t_NORTHSOUTH(t): r'[NS]$' # We cannot use lower-case letters otherwise we'll confuse # s[outh] with s[econd] t.value = -1.0 if t.value == 'S' else 1.0 return t def t_SIMPLE_UNIT(t): t.value = u.Unit(t.value) return t t_SIMPLE_UNIT.__doc__ = '|'.join( f'(?:{x})' for x in cls._get_simple_unit_names()) t_COLON = ':' t_DEGREE = r'd(eg(ree(s)?)?)?|°' t_HOUR = r'hour(s)?|h(r)?|ʰ' t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ' t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ' # A string containing ignored characters (spaces) t_ignore = ' ' # Error handling rule def t_error(t): raise ValueError( f"Invalid character at col {t.lexpos}") lexer = parsing.lex(lextab='angle_lextab', package='astropy/coordinates') def p_angle(p): ''' angle : sign hms eastwest | sign dms dir | sign arcsecond dir | sign arcminute dir | sign simple dir ''' sign = p[1] * p[3] value, unit = p[2] if isinstance(value, tuple): p[0] = ((sign * value[0],) + value[1:], unit) else: p[0] = (sign * value, unit) def p_sign(p): ''' sign : SIGN | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_eastwest(p): ''' eastwest : EASTWEST | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_dir(p): ''' dir : EASTWEST | NORTHSOUTH | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_ufloat(p): ''' ufloat : UFLOAT | UINT ''' p[0] = p[1] def p_colon(p): ''' colon : UINT COLON ufloat | UINT COLON UINT COLON ufloat ''' if len(p) == 4: p[0] = (p[1], p[3]) elif len(p) == 6: p[0] = (p[1], p[3], p[5]) def p_spaced(p): ''' spaced : UINT ufloat | UINT UINT ufloat ''' if len(p) == 3: p[0] = (p[1], p[2]) elif len(p) == 4: p[0] = (p[1], p[2], p[3]) def p_generic(p): ''' generic : colon | spaced | ufloat ''' p[0] = p[1] def p_hms(p): ''' hms : UINT HOUR | UINT HOUR ufloat | UINT HOUR UINT MINUTE | UINT HOUR UFLOAT MINUTE | UINT HOUR UINT MINUTE ufloat | UINT HOUR UINT MINUTE ufloat SECOND | generic HOUR ''' if len(p) == 3: p[0] = (p[1], u.hourangle) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.hourangle) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.hourangle) def p_dms(p): ''' dms : UINT DEGREE | UINT DEGREE ufloat | UINT DEGREE UINT MINUTE | UINT DEGREE UFLOAT MINUTE | UINT DEGREE UINT MINUTE ufloat | UINT DEGREE UINT MINUTE ufloat SECOND | generic DEGREE ''' if len(p) == 3: p[0] = (p[1], u.degree) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.degree) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.degree) def p_simple(p): ''' simple : generic | generic SIMPLE_UNIT ''' if len(p) == 2: p[0] = (p[1], None) else: p[0] = (p[1], p[2]) def p_arcsecond(p): ''' arcsecond : generic SECOND ''' p[0] = (p[1], u.arcsecond) def p_arcminute(p): ''' arcminute : generic MINUTE ''' p[0] = (p[1], u.arcminute) def p_error(p): raise ValueError parser = parsing.yacc(tabmodule='angle_parsetab', package='astropy/coordinates') return parser, lexer def parse(self, angle, unit, debug=False): try: found_angle, found_unit = self._thread_local._parser.parse( angle, lexer=self._thread_local._lexer, debug=debug) except ValueError as e: if str(e): raise ValueError(f"{str(e)} in angle {angle!r}") from e else: raise ValueError( f"Syntax error parsing angle {angle!r}") from e if unit is None and found_unit is None: raise u.UnitsError("No unit specified") return found_angle, found_unit def _check_hour_range(hrs): """ Checks that the given value is in the range (-24, 24). """ if np.any(np.abs(hrs) == 24.): warn(IllegalHourWarning(hrs, 'Treating as 24 hr')) elif np.any(hrs < -24.) or np.any(hrs > 24.): raise IllegalHourError(hrs) def _check_minute_range(m): """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if np.any(m == 60.): warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg')) elif np.any(m < -60.) or np.any(m > 60.): # "Error: minutes not in range [-60,60) ({0}).".format(min)) raise IllegalMinuteError(m) def _check_second_range(sec): """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if np.any(sec == 60.): warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min')) elif sec is None: pass elif np.any(sec < -60.) or np.any(sec > 60.): # "Error: seconds not in range [-60,60) ({0}).".format(sec)) raise IllegalSecondError(sec) def check_hms_ranges(h, m, s): """ Checks that the given hour, minute and second are all within reasonable range. """ _check_hour_range(h) _check_minute_range(m) _check_second_range(s) return None def parse_angle(angle, unit=None, debug=False): """ Parses an input string value into an angle value. Parameters ---------- angle : str A string representing the angle. May be in one of the following forms: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s unit : `~astropy.units.UnitBase` instance, optional The unit used to interpret the string. If ``unit`` is not provided, the unit must be explicitly represented in the string, either at the end or as number separators. debug : bool, optional If `True`, print debugging information from the parser. Returns ------- value, unit : tuple ``value`` is the value as a floating point number or three-part tuple, and ``unit`` is a `Unit` instance which is either the unit passed in or the one explicitly mentioned in the input string. """ return _AngleParser().parse(angle, unit, debug=debug) def degrees_to_dms(d): """ Convert a floating-point degree value into a ``(degree, arcminute, arcsecond)`` tuple. """ sign = np.copysign(1.0, d) (df, d) = np.modf(np.abs(d)) # (degree fraction, degree) (mf, m) = np.modf(df * 60.) # (minute fraction, minute) s = mf * 60. return np.floor(sign * d), sign * np.floor(m), sign * s def dms_to_degrees(d, m, s=None): """ Convert degrees, arcminute, arcsecond to a float degrees value. """ _check_minute_range(m) _check_second_range(s) # determine sign sign = np.copysign(1.0, d) try: d = np.floor(np.abs(d)) if s is None: m = np.abs(m) s = 0 else: m = np.floor(np.abs(m)) s = np.abs(s) except ValueError as err: raise ValueError(format_exception( "{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be " "converted to numbers.", d, m, s)) from err return sign * (d + m / 60. + s / 3600.) def hms_to_hours(h, m, s=None): """ Convert hour, minute, second to a float hour value. """ check_hms_ranges(h, m, s) # determine sign sign = np.copysign(1.0, h) try: h = np.floor(np.abs(h)) if s is None: m = np.abs(m) s = 0 else: m = np.floor(np.abs(m)) s = np.abs(s) except ValueError as err: raise ValueError(format_exception( "{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be " "converted to numbers.", h, m, s)) from err return sign * (h + m / 60. + s / 3600.) def hms_to_degrees(h, m, s): """ Convert hour, minute, second to a float degrees value. """ return hms_to_hours(h, m, s) * 15. def hms_to_radians(h, m, s): """ Convert hour, minute, second to a float radians value. """ return u.degree.to(u.radian, hms_to_degrees(h, m, s)) def hms_to_dms(h, m, s): """ Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)`` tuple. """ return degrees_to_dms(hms_to_degrees(h, m, s)) def hours_to_decimal(h): """ Convert any parseable hour value into a float value. """ from . import angles return angles.Angle(h, unit=u.hourangle).hour def hours_to_radians(h): """ Convert an angle in Hours to Radians. """ return u.hourangle.to(u.radian, h) def hours_to_hms(h): """ Convert an floating-point hour value into an ``(hour, minute, second)`` tuple. """ sign = np.copysign(1.0, h) (hf, h) = np.modf(np.abs(h)) # (degree fraction, degree) (mf, m) = np.modf(hf * 60.0) # (minute fraction, minute) s = mf * 60.0 return (np.floor(sign * h), sign * np.floor(m), sign * s) def radians_to_degrees(r): """ Convert an angle in Radians to Degrees. """ return u.radian.to(u.degree, r) def radians_to_hours(r): """ Convert an angle in Radians to Hours. """ return u.radian.to(u.hourangle, r) def radians_to_hms(r): """ Convert an angle in Radians to an ``(hour, minute, second)`` tuple. """ hours = radians_to_hours(r) return hours_to_hms(hours) def radians_to_dms(r): """ Convert an angle in Radians to an ``(degree, arcminute, arcsecond)`` tuple. """ degrees = u.radian.to(u.degree, r) return degrees_to_dms(degrees) def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',), fields=3): """ Given an already separated tuple of sexagesimal values, returns a string. See `hours_to_string` and `degrees_to_string` for a higher-level interface to this functionality. """ # Check to see if values[0] is negative, using np.copysign to handle -0 sign = np.copysign(1.0, values[0]) # If the coordinates are negative, we need to take the absolute values. # We use np.abs because abs(-0) is -0 # TODO: Is this true? (MHvK, 2018-02-01: not on my system) values = [np.abs(value) for value in values] if pad: if sign == -1: pad = 3 else: pad = 2 else: pad = 0 if not isinstance(sep, tuple): sep = tuple(sep) if fields < 1 or fields > 3: raise ValueError( "fields must be 1, 2, or 3") if not sep: # empty string, False, or None, etc. sep = ('', '', '') elif len(sep) == 1: if fields == 3: sep = sep + (sep[0], '') elif fields == 2: sep = sep + ('', '') else: sep = ('', '', '') elif len(sep) == 2: sep = sep + ('',) elif len(sep) != 3: raise ValueError( "Invalid separator specification for converting angle to string.") # Simplify the expression based on the requested precision. For # example, if the seconds will round up to 60, we should convert # it to 0 and carry upwards. If the field is hidden (by the # fields kwarg) we round up around the middle, 30.0. if precision is None: rounding_thresh = 60.0 - (10.0 ** -8) else: rounding_thresh = 60.0 - (10.0 ** -precision) if fields == 3 and values[2] >= rounding_thresh: values[2] = 0.0 values[1] += 1.0 elif fields < 3 and values[2] >= 30.0: values[1] += 1.0 if fields >= 2 and values[1] >= 60.0: values[1] = 0.0 values[0] += 1.0 elif fields < 2 and values[1] >= 30.0: values[0] += 1.0 literal = [] last_value = '' literal.append('{0:0{pad}.0f}{sep[0]}') if fields >= 2: literal.append('{1:02d}{sep[1]}') if fields == 3: if precision is None: last_value = f'{abs(values[2]):.8f}' last_value = last_value.rstrip('0').rstrip('.') else: last_value = '{0:.{precision}f}'.format( abs(values[2]), precision=precision) if len(last_value) == 1 or last_value[1] == '.': last_value = '0' + last_value literal.append('{last_value}{sep[2]}') literal = ''.join(literal) return literal.format(np.copysign(values[0], sign), int(values[1]), values[2], sep=sep, pad=pad, last_value=last_value) def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'), fields=3): """ Takes a decimal hour value and returns a string formatted as hms with separator specified by the 'sep' parameter. ``h`` must be a scalar. """ h, m, s = hours_to_hms(h) return sexagesimal_to_string((h, m, s), precision=precision, pad=pad, sep=sep, fields=fields) def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3): """ Takes a decimal hour value and returns a string formatted as dms with separator specified by the 'sep' parameter. ``d`` must be a scalar. """ d, m, s = degrees_to_dms(d) return sexagesimal_to_string((d, m, s), precision=precision, pad=pad, sep=sep, fields=fields)
05acf53b2bd558e8e021442eb12f9e2b4b4d268be0eb466a999e35bd805044ad
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : unit-like Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity` ['speed'], optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = redshift.to(u.km / u.s, u.doppler_redshift()) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"arbitrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( f"No velocity defined on frame, assuming {ZERO_VELOCITIES}.", NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : unit-like Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity` ['speed'], optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` ['speed'] Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- `astropy.units.Quantity` Redshift of target. """ return self.radial_velocity.to(u.dimensionless_unscaled, u.doppler_redshift()) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` ['speed'] The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` ['speed'] Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` ['speed'] Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = target_shift.to(u.km / u.s, u.doppler_redshift()) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = observer_shift.to(u.km / u.s, u.doppler_redshift()) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
da946455540b47f18c9e25117096f94418908d74e5efaa4c3d0a154d1f6f9569
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst ''' This module defines custom errors and exceptions used in astropy.coordinates. ''' from astropy.utils.exceptions import AstropyWarning __all__ = ['RangeError', 'BoundsError', 'IllegalHourError', 'IllegalMinuteError', 'IllegalSecondError', 'ConvertError', 'IllegalHourWarning', 'IllegalMinuteWarning', 'IllegalSecondWarning', 'UnknownSiteException'] class RangeError(ValueError): """ Raised when some part of an angle is out of its valid range. """ class BoundsError(RangeError): """ Raised when an angle is outside of its user-specified bounds. """ class IllegalHourError(RangeError): """ Raised when an hour value is not in the range [0,24). Parameters ---------- hour : int, float Examples -------- .. code-block:: python if not 0 <= hr < 24: raise IllegalHourError(hour) """ def __init__(self, hour): self.hour = hour def __str__(self): return f"An invalid value for 'hours' was found ('{self.hour}'); must be in the range [0,24)." class IllegalHourWarning(AstropyWarning): """ Raised when an hour value is 24. Parameters ---------- hour : int, float """ def __init__(self, hour, alternativeactionstr=None): self.hour = hour self.alternativeactionstr = alternativeactionstr def __str__(self): message = f"'hour' was found to be '{self.hour}', which is not in range (-24, 24)." if self.alternativeactionstr is not None: message += ' ' + self.alternativeactionstr return message class IllegalMinuteError(RangeError): """ Raised when an minute value is not in the range [0,60]. Parameters ---------- minute : int, float Examples -------- .. code-block:: python if not 0 <= min < 60: raise IllegalMinuteError(minute) """ def __init__(self, minute): self.minute = minute def __str__(self): return f"An invalid value for 'minute' was found ('{self.minute}'); should be in the range [0,60)." class IllegalMinuteWarning(AstropyWarning): """ Raised when a minute value is 60. Parameters ---------- minute : int, float """ def __init__(self, minute, alternativeactionstr=None): self.minute = minute self.alternativeactionstr = alternativeactionstr def __str__(self): message = f"'minute' was found to be '{self.minute}', which is not in range [0,60)." if self.alternativeactionstr is not None: message += ' ' + self.alternativeactionstr return message class IllegalSecondError(RangeError): """ Raised when an second value (time) is not in the range [0,60]. Parameters ---------- second : int, float Examples -------- .. code-block:: python if not 0 <= sec < 60: raise IllegalSecondError(second) """ def __init__(self, second): self.second = second def __str__(self): return f"An invalid value for 'second' was found ('{self.second}'); should be in the range [0,60)." class IllegalSecondWarning(AstropyWarning): """ Raised when a second value is 60. Parameters ---------- second : int, float """ def __init__(self, second, alternativeactionstr=None): self.second = second self.alternativeactionstr = alternativeactionstr def __str__(self): message = f"'second' was found to be '{self.second}', which is not in range [0,60)." if self.alternativeactionstr is not None: message += ' ' + self.alternativeactionstr return message # TODO: consider if this should be used to `units`? class UnitsError(ValueError): """ Raised if units are missing or invalid. """ class ConvertError(Exception): """ Raised if a coordinate system cannot be converted to another """ class UnknownSiteException(KeyError): def __init__(self, site, attribute, close_names=None): message = f"Site '{site}' not in database. Use {attribute} to see available sites." if close_names: message += " Did you mean one of: '{}'?'".format("', '".join(close_names)) self.site = site self.attribute = attribute self.close_names = close_names return super().__init__(message)
f0b4d4c9d86abb996205750f6cacc800593e500591c2f45950f5f1af66ab962a
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utililies used for constructing and inspecting rotation matrices. """ from functools import reduce import numpy as np from astropy import units as u from .angles import Angle def matrix_product(*matrices): """Matrix multiply all arguments together. Arguments should have dimension 2 or larger. Larger dimensional objects are interpreted as stacks of matrices residing in the last two dimensions. This function mostly exists for readability: using `~numpy.matmul` directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even better readability, one might consider using `~numpy.matrix` for the arguments (so that one could write ``m1 * m2 * m3``), but then it is not possible to handle stacks of matrices. Once only python >=3.5 is supported, this function can be replaced by ``m1 @ m2 @ m3``. """ return reduce(np.matmul, matrices) def matrix_transpose(matrix): """Transpose a matrix or stack of matrices by swapping the last two axes. This function mostly exists for readability; seeing ``.swapaxes(-2, -1)`` it is not that obvious that one does a transpose. Note that one cannot use `~numpy.ndarray.T`, as this transposes all axes and thus does not work for stacks of matrices. """ return matrix.swapaxes(-2, -1) def rotation_matrix(angle, axis='z', unit=None): """ Generate matrices for rotation by some angle around some axis. Parameters ---------- angle : angle-like The amount of rotation the matrices should represent. Can be an array. axis : str or array-like Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is counterclockwise looking down the + axis (e.g. positive rotations obey left-hand-rule). If given as an array, the last dimension should be 3; it will be broadcast against ``angle``. unit : unit-like, optional If ``angle`` does not have associated units, they are in this unit. If neither are provided, it is assumed to be degrees. Returns ------- rmat : `numpy.matrix` A unitary rotation matrix. """ if isinstance(angle, u.Quantity): angle = angle.to_value(u.radian) else: if unit is None: angle = np.deg2rad(angle) else: angle = u.Unit(unit).to(u.rad, angle) s = np.sin(angle) c = np.cos(angle) # use optimized implementations for x/y/z try: i = 'xyz'.index(axis) except TypeError: axis = np.asarray(axis) axis = axis / np.sqrt((axis * axis).sum(axis=-1, keepdims=True)) R = (axis[..., np.newaxis] * axis[..., np.newaxis, :] * (1. - c)[..., np.newaxis, np.newaxis]) for i in range(0, 3): R[..., i, i] += c a1 = (i + 1) % 3 a2 = (i + 2) % 3 R[..., a1, a2] += axis[..., i] * s R[..., a2, a1] -= axis[..., i] * s else: a1 = (i + 1) % 3 a2 = (i + 2) % 3 R = np.zeros(getattr(angle, 'shape', ()) + (3, 3)) R[..., i, i] = 1. R[..., a1, a1] = c R[..., a1, a2] = s R[..., a2, a1] = -s R[..., a2, a2] = c return R def angle_axis(matrix): """ Angle of rotation and rotation axis for a given rotation matrix. Parameters ---------- matrix : array-like A 3 x 3 unitary rotation matrix (or stack of matrices). Returns ------- angle : `~astropy.coordinates.Angle` The angle of rotation. axis : array The (normalized) axis of rotation (with last dimension 3). """ m = np.asanyarray(matrix) if m.shape[-2:] != (3, 3): raise ValueError('matrix is not 3x3') axis = np.zeros(m.shape[:-1]) axis[..., 0] = m[..., 2, 1] - m[..., 1, 2] axis[..., 1] = m[..., 0, 2] - m[..., 2, 0] axis[..., 2] = m[..., 1, 0] - m[..., 0, 1] r = np.sqrt((axis * axis).sum(-1, keepdims=True)) angle = np.arctan2(r[..., 0], m[..., 0, 0] + m[..., 1, 1] + m[..., 2, 2] - 1.) return Angle(angle, u.radian), -axis / r def is_O3(matrix): """Check whether a matrix is in the length-preserving group O(3). Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose`. Returns ------- is_o3 : bool or array of bool If the matrix has more than two axes, the O(3) check is performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. Notes ----- The orthogonal group O(3) preserves lengths, but is not guaranteed to keep orientations. Rotations and reflections are in this group. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group """ # matrix is in O(3) (rotations, proper and improper). I = np.identity(matrix.shape[-1]) is_o3 = np.all(np.isclose(matrix @ matrix.swapaxes(-2, -1), I, atol=1e-15), axis=(-2, -1)) return is_o3 def is_rotation(matrix, allow_improper=False): """Check whether a matrix is a rotation, proper or improper. Parameters ---------- matrix : (..., N, N) array-like Must have attribute ``.shape`` and method ``.swapaxes()`` and not error when using `~numpy.isclose` and `~numpy.linalg.det`. allow_improper : bool, optional Whether to restrict check to the SO(3), the group of proper rotations, or also allow improper rotations (with determinant -1). The default (False) is only SO(3). Returns ------- isrot : bool or array of bool If the matrix has more than two axes, the checks are performed on slices along the last two axes -- (M, N, N) => (M, ) bool array. See Also -------- `~astopy.coordinates.matrix_utilities.is_O3` For the less restrictive check that a matrix is in the group O(3). Notes ----- The group SO(3) is the rotation group. It is O(3), with determinant 1. Rotations with determinant -1 are improper rotations, combining both a rotation and a reflection. For more information, see https://en.wikipedia.org/wiki/Orthogonal_group """ # matrix is in O(3). is_o3 = is_O3(matrix) # determinant checks for rotation (proper and improper) if allow_improper: # determinant can be +/- 1 is_det1 = np.isclose(np.abs(np.linalg.det(matrix)), 1.0) else: # restrict to SO(3) is_det1 = np.isclose(np.linalg.det(matrix), 1.0) return is_o3 & is_det1
d95774d69e7b29e829461969e1ddbf88bb2a3356ea8c13e0513bf44d7728212f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains classes and functions for celestial coordinates of astronomical objects. It also contains a framework for conversions between coordinate systems. """ from .errors import * from .angles import * from .baseframe import * from .attributes import * from .distances import * from .earth import * from .transformations import * from .builtin_frames import * from .name_resolve import * from .matching import * from .representation import * from .sky_coordinate import * from .funcs import * from .calculation import * from .solar_system import * from .spectral_quantity import * from .spectral_coordinate import * from .angle_utilities import *
eb7e2b38d5f2be4d5cec4a1fb1a17898835f9cfa066008b1dc5ca8aa0ea4ff51
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains functions for matching coordinate catalogs. """ import numpy as np from .representation import UnitSphericalRepresentation from astropy import units as u from . import Angle from .sky_coordinate import SkyCoord __all__ = ['match_coordinates_3d', 'match_coordinates_sky', 'search_around_3d', 'search_around_sky'] def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_3d'): """ Finds the nearest 3-dimensional matches of a coordinate or coordinates in a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord``, as in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. """ if catalogcoord.isscalar or len(catalogcoord) < 1: raise ValueError('The catalog for coordinate matching cannot be a ' 'scalar or length-0.') kdt = _get_cartesian_kdtree(catalogcoord, storekdtree) # make sure coordinate systems match if isinstance(matchcoord, SkyCoord): matchcoord = matchcoord.transform_to(catalogcoord, merge_attributes=False) else: matchcoord = matchcoord.transform_to(catalogcoord) # make sure units match catunit = catalogcoord.cartesian.x.unit matchxyz = matchcoord.cartesian.xyz.to(catunit) matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3)) # Querying NaN returns garbage if np.isnan(matchflatxyz.value).any(): raise ValueError("Matching coordinates cannot contain NaN entries.") dist, idx = kdt.query(matchflatxyz.T, nthneighbor) if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise dist = dist[:, -1] idx = idx[:, -1] sep2d = catalogcoord[idx].separation(matchcoord) return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit def match_coordinates_sky(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_sky'): """ Finds the nearest on-sky matches of a coordinate or coordinates in a set of catalog coordinates. This finds the on-sky closest neighbor, which is only different from the 3-dimensional match if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord`` in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. If either ``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D distance on the unit sphere, rather than a true distance. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. """ if catalogcoord.isscalar or len(catalogcoord) < 1: raise ValueError('The catalog for coordinate matching cannot be a ' 'scalar or length-0.') # send to catalog frame if isinstance(matchcoord, SkyCoord): newmatch = matchcoord.transform_to(catalogcoord, merge_attributes=False) else: newmatch = matchcoord.transform_to(catalogcoord) # strip out distance info match_urepr = newmatch.data.represent_as(UnitSphericalRepresentation) newmatch_u = newmatch.realize_frame(match_urepr) cat_urepr = catalogcoord.data.represent_as(UnitSphericalRepresentation) newcat_u = catalogcoord.realize_frame(cat_urepr) # Check for a stored KD-tree on the passed-in coordinate. Normally it will # have a distinct name from the "3D" one, so it's safe to use even though # it's based on UnitSphericalRepresentation. storekdtree = catalogcoord.cache.get(storekdtree, storekdtree) idx, sep2d, sep3d = match_coordinates_3d(newmatch_u, newcat_u, nthneighbor, storekdtree) # sep3d is *wrong* above, because the distance information was removed, # unless one of the catalogs doesn't have a real distance if not (isinstance(catalogcoord.data, UnitSphericalRepresentation) or isinstance(newmatch.data, UnitSphericalRepresentation)): sep3d = catalogcoord[idx].separation_3d(newmatch) # update the kdtree on the actual passed-in coordinate if isinstance(storekdtree, str): catalogcoord.cache[storekdtree] = newcat_u.cache[storekdtree] elif storekdtree is True: # the old backwards-compatible name catalogcoord.cache['kdtree'] = newcat_u.cache['kdtree'] return idx, sep2d, sep3d def search_around_3d(coords1, coords2, distlimit, storekdtree='kdtree_3d'): """ Searches for pairs of points that are at least as close as a specified distance in 3D space. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation_3d`` methods. Parameters ---------- coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. distlimit : `~astropy.units.Quantity` ['length'] The physical radius to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search with the name ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : int array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : int array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``. The unit is that of ``coords1``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. If you are using this function to search in a catalog for matches around specific points, the convention is for ``coords2`` to be the catalog, and ``coords1`` are the points to search around. While these operations are mathematically the same if ``coords1`` and ``coords2`` are flipped, some of the optimizations may work better if this convention is obeyed. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. """ if not distlimit.isscalar: raise ValueError('distlimit must be a scalar in search_around_3d') if coords1.isscalar or coords2.isscalar: raise ValueError('One of the inputs to search_around_3d is a scalar. ' 'search_around_3d is intended for use with array ' 'coordinates, not scalars. Instead, use ' '``coord1.separation_3d(coord2) < distlimit`` to find ' 'the coordinates near a scalar coordinate.') if len(coords1) == 0 or len(coords2) == 0: # Empty array input: return empty match return (np.array([], dtype=int), np.array([], dtype=int), Angle([], u.deg), u.Quantity([], coords1.distance.unit)) kdt2 = _get_cartesian_kdtree(coords2, storekdtree) cunit = coords2.cartesian.x.unit # we convert coord1 to match coord2's frame. We do it this way # so that if the conversion does happen, the KD tree of coord2 at least gets # saved. (by convention, coord2 is the "catalog" if that makes sense) coords1 = coords1.transform_to(coords2) kdt1 = _get_cartesian_kdtree(coords1, storekdtree, forceunit=cunit) # this is the *cartesian* 3D distance that corresponds to the given angle d = distlimit.to_value(cunit) idxs1 = [] idxs2 = [] for i, matches in enumerate(kdt1.query_ball_tree(kdt2, d)): for match in matches: idxs1.append(i) idxs2.append(match) idxs1 = np.array(idxs1, dtype=int) idxs2 = np.array(idxs2, dtype=int) if idxs1.size == 0: d2ds = Angle([], u.deg) d3ds = u.Quantity([], coords1.distance.unit) else: d2ds = coords1[idxs1].separation(coords2[idxs2]) d3ds = coords1[idxs1].separation_3d(coords2[idxs2]) return idxs1, idxs2, d2ds, d3ds def search_around_sky(coords1, coords2, seplimit, storekdtree='kdtree_sky'): """ Searches for pairs of points that have an angular separation at least as close as a specified angle. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation`` methods. Parameters ---------- coords1 : coordinate-like The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : coordinate-like The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. seplimit : `~astropy.units.Quantity` ['angle'] The on-sky separation to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search with the name ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : int array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : int array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``; the unit is that of ``coords1``. If either ``coords1`` or ``coords2`` don't have a distance, this is the 3D distance on the unit sphere, rather than a physical distance. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. """ if not seplimit.isscalar: raise ValueError('seplimit must be a scalar in search_around_sky') if coords1.isscalar or coords2.isscalar: raise ValueError('One of the inputs to search_around_sky is a scalar. ' 'search_around_sky is intended for use with array ' 'coordinates, not scalars. Instead, use ' '``coord1.separation(coord2) < seplimit`` to find the ' 'coordinates near a scalar coordinate.') if len(coords1) == 0 or len(coords2) == 0: # Empty array input: return empty match if coords2.distance.unit == u.dimensionless_unscaled: distunit = u.dimensionless_unscaled else: distunit = coords1.distance.unit return (np.array([], dtype=int), np.array([], dtype=int), Angle([], u.deg), u.Quantity([], distunit)) # we convert coord1 to match coord2's frame. We do it this way # so that if the conversion does happen, the KD tree of coord2 at least gets # saved. (by convention, coord2 is the "catalog" if that makes sense) coords1 = coords1.transform_to(coords2) # strip out distance info urepr1 = coords1.data.represent_as(UnitSphericalRepresentation) ucoords1 = coords1.realize_frame(urepr1) kdt1 = _get_cartesian_kdtree(ucoords1, storekdtree) if storekdtree and coords2.cache.get(storekdtree): # just use the stored KD-Tree kdt2 = coords2.cache[storekdtree] else: # strip out distance info urepr2 = coords2.data.represent_as(UnitSphericalRepresentation) ucoords2 = coords2.realize_frame(urepr2) kdt2 = _get_cartesian_kdtree(ucoords2, storekdtree) if storekdtree: # save the KD-Tree in coords2, *not* ucoords2 coords2.cache['kdtree' if storekdtree is True else storekdtree] = kdt2 # this is the *cartesian* 3D distance that corresponds to the given angle r = (2 * np.sin(Angle(seplimit) / 2.0)).value idxs1 = [] idxs2 = [] for i, matches in enumerate(kdt1.query_ball_tree(kdt2, r)): for match in matches: idxs1.append(i) idxs2.append(match) idxs1 = np.array(idxs1, dtype=int) idxs2 = np.array(idxs2, dtype=int) if idxs1.size == 0: if coords2.distance.unit == u.dimensionless_unscaled: distunit = u.dimensionless_unscaled else: distunit = coords1.distance.unit d2ds = Angle([], u.deg) d3ds = u.Quantity([], distunit) else: d2ds = coords1[idxs1].separation(coords2[idxs2]) try: d3ds = coords1[idxs1].separation_3d(coords2[idxs2]) except ValueError: # they don't have distances, so we just fall back on the cartesian # distance, computed from d2ds d3ds = 2 * np.sin(d2ds / 2.0) return idxs1, idxs2, d2ds, d3ds def _get_cartesian_kdtree(coord, attrname_or_kdt='kdtree', forceunit=None): """ This is a utility function to retrieve (and build/cache, if necessary) a 3D cartesian KD-Tree from various sorts of astropy coordinate objects. Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinates to build the KD-Tree for. attrname_or_kdt : bool or str or KDTree If a string, will store the KD-Tree used for the computation in the ``coord``, in ``coord.cache`` with the provided name. If given as a KD-Tree, it will just be used directly. forceunit : unit or None If a unit, the cartesian coordinates will convert to that unit before being put in the KD-Tree. If None, whatever unit it's already in will be used Returns ------- kdt : `~scipy.spatial.cKDTree` or `~scipy.spatial.KDTree` The KD-Tree representing the 3D cartesian representation of the input coordinates. """ from warnings import warn # without scipy this will immediately fail from scipy import spatial try: KDTree = spatial.cKDTree except Exception: warn('C-based KD tree not found, falling back on (much slower) ' 'python implementation') KDTree = spatial.KDTree if attrname_or_kdt is True: # backwards compatibility for pre v0.4 attrname_or_kdt = 'kdtree' # figure out where any cached KDTree might be if isinstance(attrname_or_kdt, str): kdt = coord.cache.get(attrname_or_kdt, None) if kdt is not None and not isinstance(kdt, KDTree): raise TypeError(f'The `attrname_or_kdt` "{attrname_or_kdt}" is not a scipy KD tree!') elif isinstance(attrname_or_kdt, KDTree): kdt = attrname_or_kdt attrname_or_kdt = None elif not attrname_or_kdt: kdt = None else: raise TypeError('Invalid `attrname_or_kdt` argument for KD-Tree:' + str(attrname_or_kdt)) if kdt is None: # need to build the cartesian KD-tree for the catalog if forceunit is None: cartxyz = coord.cartesian.xyz else: cartxyz = coord.cartesian.xyz.to(forceunit) flatxyz = cartxyz.reshape((3, np.prod(cartxyz.shape) // 3)) # There should be no NaNs in the kdtree data. if np.isnan(flatxyz.value).any(): raise ValueError("Catalog coordinates cannot contain NaN entries.") try: # Set compact_nodes=False, balanced_tree=False to use # "sliding midpoint" rule, which is much faster than standard for # many common use cases kdt = KDTree(flatxyz.value.T, compact_nodes=False, balanced_tree=False) except TypeError: # Python implementation does not take compact_nodes and balanced_tree # as arguments. However, it uses sliding midpoint rule by default kdt = KDTree(flatxyz.value.T) if attrname_or_kdt: # cache the kdtree in `coord` coord.cache[attrname_or_kdt] = kdt return kdt
ba37aca66382814cb3e1f00c1a7ff575ad0ddfd089c5e412c388ff2df7cee1b9
import re import copy import warnings import operator import numpy as np import erfa from astropy.utils.compat.misc import override__dir__ from astropy import units as u from astropy.constants import c as speed_of_light from astropy.utils.data_info import MixinInfo from astropy.utils import ShapedLikeNDArray from astropy.table import QTable from astropy.time import Time from astropy.utils.exceptions import AstropyUserWarning from .distances import Distance from .angles import Angle from .baseframe import (BaseCoordinateFrame, frame_transform_graph, GenericFrame) from .builtin_frames import ICRS, SkyOffsetFrame from .representation import (RadialDifferential, SphericalDifferential, SphericalRepresentation, UnitSphericalCosLatDifferential, UnitSphericalDifferential, UnitSphericalRepresentation) from .sky_coordinate_parsers import (_get_frame_class, _get_frame_without_data, _parse_coordinate_data) __all__ = ['SkyCoord', 'SkyCoordInfo'] class SkyCoordInfo(MixinInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attrs_from_parent = set(['unit']) # Unit is read-only _supports_indexing = False @staticmethod def default_format(val): repr_data = val.info._repr_data formats = ['{0.' + compname + '.value:}' for compname in repr_data.components] return ','.join(formats).format(repr_data) @property def unit(self): repr_data = self._repr_data unit = ','.join(str(getattr(repr_data, comp).unit) or 'None' for comp in repr_data.components) return unit @property def _repr_data(self): if self._parent is None: return None sc = self._parent if (issubclass(sc.representation_type, SphericalRepresentation) and isinstance(sc.data, UnitSphericalRepresentation)): repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True) else: repr_data = sc.represent_as(sc.representation_type, in_frame_units=True) return repr_data def _represent_as_dict(self): sc = self._parent attrs = list(sc.representation_component_names) # Don't output distance unless it's actually distance. if isinstance(sc.data, UnitSphericalRepresentation): attrs = attrs[:-1] diff = sc.data.differentials.get('s') if diff is not None: diff_attrs = list(sc.get_representation_component_names('s')) # Don't output proper motions if they haven't been specified. if isinstance(diff, RadialDifferential): diff_attrs = diff_attrs[2:] # Don't output radial velocity unless it's actually velocity. elif isinstance(diff, (UnitSphericalDifferential, UnitSphericalCosLatDifferential)): diff_attrs = diff_attrs[:-1] attrs.extend(diff_attrs) attrs.extend(frame_transform_graph.frame_attributes.keys()) out = super()._represent_as_dict(attrs) out['representation_type'] = sc.representation_type.get_name() out['frame'] = sc.frame.name # Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None' # or None,None,m) and is not stored. The individual attributes have # units. return out def new_like(self, skycoords, length, metadata_conflicts='warn', name=None): """ Return a new SkyCoord instance which is consistent with the input SkyCoord objects ``skycoords`` and has ``length`` rows. Being "consistent" is defined as being able to set an item from one to each of the rest without any exception being raised. This is intended for creating a new SkyCoord instance whose elements can be set in-place for table operations like join or vstack. This is used when a SkyCoord object is used as a mixin column in an astropy Table. The data values are not predictable and it is expected that the consumer of the object will fill in all values. Parameters ---------- skycoords : list List of input SkyCoord objects length : int Length of the output skycoord object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output name (sets output skycoord.info.name) Returns ------- skycoord : SkyCoord (or subclass) Instance of this class consistent with ``skycoords`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(skycoords, metadata_conflicts, name, ('meta', 'description')) skycoord0 = skycoords[0] # Make a new SkyCoord object with the desired length and attributes # by using the _apply / __getitem__ machinery to effectively return # skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame # attributes with the right shape. indexes = np.zeros(length, dtype=np.int64) out = skycoord0[indexes] # Use __setitem__ machinery to check for consistency of all skycoords for skycoord in skycoords[1:]: try: out[0] = skycoord[0] except Exception as err: raise ValueError(f'Input skycoords are inconsistent.') from err # Set (merged) info attributes for attr in ('name', 'meta', 'description'): if attr in attrs: setattr(out.info, attr, attrs[attr]) return out class SkyCoord(ShapedLikeNDArray): """High-level object providing a flexible interface for celestial coordinate representation, manipulation, and transformation between systems. The `SkyCoord` class accepts a wide variety of inputs for initialization. At a minimum these must provide one or more celestial coordinate values with unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding scalar or array coordinates (can be checked via ``SkyCoord.isscalar``). Typically one also specifies the coordinate frame, though this is not required. The general pattern for spherical representations is:: SkyCoord(COORD, [FRAME], keyword_args ...) SkyCoord(LON, LAT, [FRAME], keyword_args ...) SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...) SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...) It is also possible to input coordinate values in other representations such as cartesian or cylindrical. In this case one includes the keyword argument ``representation_type='cartesian'`` (for example) along with data in ``x``, ``y``, and ``z``. See also: https://docs.astropy.org/en/stable/coordinates/ Examples -------- The examples below illustrate common ways of initializing a `SkyCoord` object. For a complete description of the allowed syntax see the full coordinates documentation. First some imports:: >>> from astropy.coordinates import SkyCoord # High-level coordinates >>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames >>> from astropy.coordinates import Angle, Latitude, Longitude # Angles >>> import astropy.units as u The coordinate values and frame specification can now be provided using positional and keyword arguments:: >>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame >>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords >>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"] >>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21") >>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string >>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s") >>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle >>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity >>> c = SkyCoord(ra, dec, frame='icrs') >>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56') >>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox >>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults >>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic', ... representation_type='cartesian') >>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)]) Velocity components (proper motions or radial velocities) can also be provided in a similar manner:: >>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s) >>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr) As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame` class or the corresponding string alias. The frame classes that are built in to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`. The string aliases are simply lower-case versions of the class name, and allow for creating a `SkyCoord` object and transforming frames without explicitly importing the frame classes. Parameters ---------- frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional Type of coordinate frame this `SkyCoord` should represent. Defaults to to ICRS if not given or given as None. unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional Units for supplied coordinate values. If only one unit is supplied then it applies to all values. Note that passing only one unit might lead to unit conversion errors if the coordinate values are expected to have mixed physical meanings (e.g., angles and distances). obstime : time-like, optional Time(s) of observation. equinox : time-like, optional Coordinate frame equinox time. representation_type : str or Representation class Specifies the representation, e.g. 'spherical', 'cartesian', or 'cylindrical'. This affects the positional args and other keyword args which must correspond to the given representation. copy : bool, optional If `True` (default), a copy of any coordinate data is made. This argument can only be passed in as a keyword argument. **keyword_args Other keyword arguments as applicable for user-defined coordinate frames. Common options include: ra, dec : angle-like, optional RA and Dec for frames where ``ra`` and ``dec`` are keys in the frame's ``representation_component_names``, including `ICRS`, `FK5`, `FK4`, and `FK4NoETerms`. pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional Proper motion components, in angle per time units. l, b : angle-like, optional Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are keys in the frame's ``representation_component_names``, including the `Galactic` frame. pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional Proper motion components in the `Galactic` frame, in angle per time units. x, y, z : float or `~astropy.units.Quantity` ['length'], optional Cartesian coordinates values u, v, w : float or `~astropy.units.Quantity` ['length'], optional Cartesian coordinates values for the Galactic frame. radial_velocity : `~astropy.units.Quantity` ['speed'], optional The component of the velocity along the line-of-sight (i.e., the radial direction), in velocity units. """ # Declare that SkyCoord can be used as a Table column by defining the # info property. info = SkyCoordInfo() def __init__(self, *args, copy=True, **kwargs): # these are frame attributes set on this SkyCoord but *not* a part of # the frame object this SkyCoord contains self._extra_frameattr_names = set() # If all that is passed in is a frame instance that already has data, # we should bypass all of the parsing and logic below. This is here # to make this the fastest way to create a SkyCoord instance. Many of # the classmethods implemented for performance enhancements will use # this as the initialization path if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))): coords = args[0] if isinstance(coords, SkyCoord): self._extra_frameattr_names = coords._extra_frameattr_names self.info = coords.info # Copy over any extra frame attributes for attr_name in self._extra_frameattr_names: # Setting it will also validate it. setattr(self, attr_name, getattr(coords, attr_name)) coords = coords.frame if not coords.has_data: raise ValueError('Cannot initialize from a coordinate frame ' 'instance without coordinate data') if copy: self._sky_coord_frame = coords.copy() else: self._sky_coord_frame = coords else: # Get the frame instance without coordinate data but with all frame # attributes set - these could either have been passed in with the # frame as an instance, or passed in as kwargs here frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs) # Parse the args and kwargs to assemble a sanitized and validated # kwargs dict for initializing attributes for this object and for # creating the internal self._sky_coord_frame object args = list(args) # Make it mutable skycoord_kwargs, components, info = _parse_coordinate_data( frame_cls(**frame_kwargs), args, kwargs) # In the above two parsing functions, these kwargs were identified # as valid frame attributes for *some* frame, but not the frame that # this SkyCoord will have. We keep these attributes as special # skycoord frame attributes: for attr in skycoord_kwargs: # Setting it will also validate it. setattr(self, attr, skycoord_kwargs[attr]) if info is not None: self.info = info # Finally make the internal coordinate object. frame_kwargs.update(components) self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs) if not self._sky_coord_frame.has_data: raise ValueError('Cannot create a SkyCoord without data') @property def frame(self): return self._sky_coord_frame @property def representation_type(self): return self.frame.representation_type @representation_type.setter def representation_type(self, value): self.frame.representation_type = value # TODO: remove these in future @property def representation(self): return self.frame.representation @representation.setter def representation(self, value): self.frame.representation = value @property def shape(self): return self.frame.shape def __eq__(self, value): """Equality operator for SkyCoord This implements strict equality and requires that the frames are equivalent, extra frame attributes are equivalent, and that the representation data are exactly equal. """ if not isinstance(value, SkyCoord): return NotImplemented # Make sure that any extra frame attribute names are equivalent. for attr in self._extra_frameattr_names | value._extra_frameattr_names: if not self.frame._frameattr_equiv(getattr(self, attr), getattr(value, attr)): raise ValueError(f"cannot compare: extra frame attribute " f"'{attr}' is not equivalent " f"(perhaps compare the frames directly to avoid " f"this exception)") return self._sky_coord_frame == value._sky_coord_frame def __ne__(self, value): return np.logical_not(self == value) def _apply(self, method, *args, **kwargs): """Create a new instance, applying a method to the underlying data. In typical usage, the method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.), which are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be applied to the underlying arrays in the representation (e.g., ``x``, ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), as well as to any frame attributes that have a shape, with the results used to create a new instance. Internally, it is also used to apply functions to the above parts (in particular, `~numpy.broadcast_to`). Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. """ def apply_method(value): if isinstance(value, ShapedLikeNDArray): return value._apply(method, *args, **kwargs) else: if callable(method): return method(value, *args, **kwargs) else: return getattr(value, method)(*args, **kwargs) # create a new but empty instance, and copy over stuff new = super().__new__(self.__class__) new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs) new._extra_frameattr_names = self._extra_frameattr_names.copy() for attr in self._extra_frameattr_names: value = getattr(self, attr) if getattr(value, 'shape', ()): value = apply_method(value) elif method == 'copy' or method == 'flatten': # flatten should copy also for a single element array, but # we cannot use it directly for array scalars, since it # always returns a one-dimensional array. So, just copy. value = copy.copy(value) setattr(new, '_' + attr, value) # Copy other 'info' attr only if it has actually been defined. # See PR #3898 for further explanation and justification, along # with Quantity.__array_finalize__ if 'info' in self.__dict__: new.info = self.info return new def __setitem__(self, item, value): """Implement self[item] = value for SkyCoord The right hand ``value`` must be strictly consistent with self: - Identical class - Equivalent frames - Identical representation_types - Identical representation differentials keys - Identical frame attributes - Identical "extra" frame attributes (e.g. obstime for an ICRS coord) With these caveats the setitem ends up as effectively a setitem on the representation data. self.frame.data[item] = value.frame.data """ if self.__class__ is not value.__class__: raise TypeError(f'can only set from object of same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') # Make sure that any extra frame attribute names are equivalent. for attr in self._extra_frameattr_names | value._extra_frameattr_names: if not self.frame._frameattr_equiv(getattr(self, attr), getattr(value, attr)): raise ValueError(f'attribute {attr} is not equivalent') # Set the frame values. This checks frame equivalence and also clears # the cache to ensure that the object is not in an inconsistent state. self._sky_coord_frame[item] = value._sky_coord_frame def insert(self, obj, values, axis=0): """ Insert coordinate values before the given indices in the object and return a new Frame object. The values to be inserted must conform to the rules for in-place setting of ``SkyCoord`` objects. The API signature matches the ``np.insert`` API, but is more limited. The specification of insert index ``obj`` must be a single integer, and the ``axis`` must be ``0`` for simple insertion before the index. Parameters ---------- obj : int Integer index before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of quantity, ``values`` is converted to the matching type. axis : int, optional Axis along which to insert ``values``. Default is 0, which is the only allowed value and will insert a row. Returns ------- out : `~astropy.coordinates.SkyCoord` instance New coordinate object with inserted value(s) """ # Validate inputs: obj arg is integer, axis=0, self is not a scalar, and # input index is in bounds. try: idx0 = operator.index(obj) except TypeError: raise TypeError('obj arg must be an integer') if axis != 0: raise ValueError('axis must be 0') if not self.shape: raise TypeError('cannot insert into scalar {} object' .format(self.__class__.__name__)) if abs(idx0) > len(self): raise IndexError('index {} is out of bounds for axis 0 with size {}' .format(idx0, len(self))) # Turn negative index into positive if idx0 < 0: idx0 = len(self) + idx0 n_values = len(values) if values.shape else 1 # Finally make the new object with the correct length and set values for the # three sections, before insert, the insert, and after the insert. out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name) # Set the output values. This is where validation of `values` takes place to ensure # that it can indeed be inserted. out[:idx0] = self[:idx0] out[idx0:idx0 + n_values] = values out[idx0 + n_values:] = self[idx0:] return out def is_transformable_to(self, new_frame): """ Determines if this coordinate frame can be transformed to another given frame. Parameters ---------- new_frame : frame class, frame object, or str The proposed frame to transform into. Returns ------- transformable : bool or str `True` if this can be transformed to ``new_frame``, `False` if not, or the string 'same' if ``new_frame`` is the same system as this object but no transformation is defined. Notes ----- A return value of 'same' means the transformation will work, but it will just give back a copy of this object. The intended usage is:: if coord.is_transformable_to(some_unknown_frame): coord2 = coord.transform_to(some_unknown_frame) This will work even if ``some_unknown_frame`` turns out to be the same frame class as ``coord``. This is intended for cases where the frame is the same regardless of the frame attributes (e.g. ICRS), but be aware that it *might* also indicate that someone forgot to define the transformation between two objects of the same frame class but with different attributes. """ # TODO! like matplotlib, do string overrides for modified methods new_frame = (_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame) return self.frame.is_transformable_to(new_frame) def transform_to(self, frame, merge_attributes=True): """Transform this coordinate to a new frame. The precise frame transformed to depends on ``merge_attributes``. If `False`, the destination frame is used exactly as passed in. But this is often not quite what one wants. E.g., suppose one wants to transform an ICRS coordinate that has an obstime attribute to FK4; in this case, one likely would want to use this information. Thus, the default for ``merge_attributes`` is `True`, in which the precedence is as follows: (1) explicitly set (i.e., non-default) values in the destination frame; (2) explicitly set values in the source; (3) default value in the destination frame. Note that in either case, any explicitly set attributes on the source `SkyCoord` that are not part of the destination frame's definition are kept (stored on the resulting `SkyCoord`), and thus one can round-trip (e.g., from FK4 to ICRS to FK4 without losing obstime). Parameters ---------- frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance The frame to transform this coordinate into. If a `SkyCoord`, the underlying frame is extracted, and all other information ignored. merge_attributes : bool, optional Whether the default attributes in the destination frame are allowed to be overridden by explicitly set attributes in the source (see note above; default: `True`). Returns ------- coord : `SkyCoord` A new object with this coordinate represented in the `frame` frame. Raises ------ ValueError If there is no possible transformation route. """ from astropy.coordinates.errors import ConvertError frame_kwargs = {} # Frame name (string) or frame class? Coerce into an instance. try: frame = _get_frame_class(frame)() except Exception: pass if isinstance(frame, SkyCoord): frame = frame.frame # Change to underlying coord frame instance if isinstance(frame, BaseCoordinateFrame): new_frame_cls = frame.__class__ # Get frame attributes, allowing defaults to be overridden by # explicitly set attributes of the source if ``merge_attributes``. for attr in frame_transform_graph.frame_attributes: self_val = getattr(self, attr, None) frame_val = getattr(frame, attr, None) if (frame_val is not None and not (merge_attributes and frame.is_frame_attr_default(attr))): frame_kwargs[attr] = frame_val elif (self_val is not None and not self.is_frame_attr_default(attr)): frame_kwargs[attr] = self_val elif frame_val is not None: frame_kwargs[attr] = frame_val else: raise ValueError('Transform `frame` must be a frame name, class, or instance') # Get the composite transform to the new frame trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls) if trans is None: raise ConvertError('Cannot transform from {} to {}' .format(self.frame.__class__, new_frame_cls)) # Make a generic frame which will accept all the frame kwargs that # are provided and allow for transforming through intermediate frames # which may require one or more of those kwargs. generic_frame = GenericFrame(frame_kwargs) # Do the transformation, returning a coordinate frame of the desired # final type (not generic). new_coord = trans(self.frame, generic_frame) # Finally make the new SkyCoord object from the `new_coord` and # remaining frame_kwargs that are not frame_attributes in `new_coord`. for attr in (set(new_coord.get_frame_attr_names()) & set(frame_kwargs.keys())): frame_kwargs.pop(attr) # Always remove the origin frame attribute, as that attribute only makes # sense with a SkyOffsetFrame (in which case it will be stored on the frame). # See gh-11277. # TODO: Should it be a property of the frame attribute that it can # or cannot be stored on a SkyCoord? frame_kwargs.pop('origin', None) return self.__class__(new_coord, **frame_kwargs) def apply_space_motion(self, new_obstime=None, dt=None): """ Compute the position of the source represented by this coordinate object to a new time using the velocities stored in this object and assuming linear space motion (including relativistic corrections). This is sometimes referred to as an "epoch transformation." The initial time before the evolution is taken from the ``obstime`` attribute of this coordinate. Note that this method currently does not support evolving coordinates where the *frame* has an ``obstime`` frame attribute, so the ``obstime`` is only used for storing the before and after times, not actually as an attribute of the frame. Alternatively, if ``dt`` is given, an ``obstime`` need not be provided at all. Parameters ---------- new_obstime : `~astropy.time.Time`, optional The time at which to evolve the position to. Requires that the ``obstime`` attribute be present on this frame. dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional An amount of time to evolve the position of the source. Cannot be given at the same time as ``new_obstime``. Returns ------- new_coord : `SkyCoord` A new coordinate object with the evolved location of this coordinate at the new time. ``obstime`` will be set on this object to the new time only if ``self`` also has ``obstime``. """ if (new_obstime is None and dt is None or new_obstime is not None and dt is not None): raise ValueError("You must specify one of `new_obstime` or `dt`, " "but not both.") # Validate that we have velocity info if 's' not in self.frame.data.differentials: raise ValueError('SkyCoord requires velocity data to evolve the ' 'position.') if 'obstime' in self.frame.frame_attributes: raise NotImplementedError("Updating the coordinates in a frame " "with explicit time dependence is " "currently not supported. If you would " "like this functionality, please open an " "issue on github:\n" "https://github.com/astropy/astropy") if new_obstime is not None and self.obstime is None: # If no obstime is already on this object, raise an error if a new # obstime is passed: we need to know the time / epoch at which the # the position / velocity were measured initially raise ValueError('This object has no associated `obstime`. ' 'apply_space_motion() must receive a time ' 'difference, `dt`, and not a new obstime.') # Compute t1 and t2, the times used in the starpm call, which *only* # uses them to compute a delta-time t1 = self.obstime if dt is None: # self.obstime is not None and new_obstime is not None b/c of above # checks t2 = new_obstime else: # new_obstime is definitely None b/c of the above checks if t1 is None: # MAGIC NUMBER: if the current SkyCoord object has no obstime, # assume J2000 to do the dt offset. This is not actually used # for anything except a delta-t in starpm, so it's OK that it's # not necessarily the "real" obstime t1 = Time('J2000') new_obstime = None # we don't actually know the initial obstime t2 = t1 + dt else: t2 = t1 + dt new_obstime = t2 # starpm wants tdb time t1 = t1.tdb t2 = t2.tdb # proper motion in RA should not include the cos(dec) term, see the # erfa function eraStarpv, comment (4). So we convert to the regular # spherical differentials. icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential) icrsvel = icrsrep.differentials['s'] parallax_zero = False try: plx = icrsrep.distance.to_value(u.arcsecond, u.parallax()) except u.UnitConversionError: # No distance: set to 0 by convention plx = 0. parallax_zero = True try: rv = icrsvel.d_distance.to_value(u.km/u.s) except u.UnitConversionError: # No RV rv = 0. starpm = erfa.pmsafe(icrsrep.lon.radian, icrsrep.lat.radian, icrsvel.d_lon.to_value(u.radian/u.yr), icrsvel.d_lat.to_value(u.radian/u.yr), plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2) if parallax_zero: new_distance = None else: new_distance = Distance(parallax=starpm[4] << u.arcsec) icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False), dec=u.Quantity(starpm[1], u.radian, copy=False), pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False), pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False), distance=new_distance, radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False), differential_type=SphericalDifferential) # Update the obstime of the returned SkyCoord, and need to carry along # the frame attributes frattrs = {attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names} frattrs['obstime'] = new_obstime result = self.__class__(icrs2, **frattrs).transform_to(self.frame) # Without this the output might not have the right differential type. # Not sure if this fixes the problem or just hides it. See #11932 result.differential_type = self.differential_type return result def _is_name(self, string): """ Returns whether a string is one of the aliases for the frame. """ return (self.frame.name == string or (isinstance(self.frame.name, list) and string in self.frame.name)) def __getattr__(self, attr): """ Overrides getattr to return coordinates that this can be transformed to, based on the alias attr in the primary transform graph. """ if '_sky_coord_frame' in self.__dict__: if self._is_name(attr): return self # Should this be a deepcopy of self? # Anything in the set of all possible frame_attr_names is handled # here. If the attr is relevant for the current frame then delegate # to self.frame otherwise get it from self._<attr>. if attr in frame_transform_graph.frame_attributes: if attr in self.frame.get_frame_attr_names(): return getattr(self.frame, attr) else: return getattr(self, '_' + attr, None) # Some attributes might not fall in the above category but still # are available through self._sky_coord_frame. if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): return getattr(self._sky_coord_frame, attr) # Try to interpret as a new frame for transforming. frame_cls = frame_transform_graph.lookup_name(attr) if frame_cls is not None and self.frame.is_transformable_to(frame_cls): return self.transform_to(attr) # Fail raise AttributeError("'{}' object has no attribute '{}'" .format(self.__class__.__name__, attr)) def __setattr__(self, attr, val): # This is to make anything available through __getattr__ immutable if '_sky_coord_frame' in self.__dict__: if self._is_name(attr): raise AttributeError(f"'{attr}' is immutable") if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): setattr(self._sky_coord_frame, attr, val) return frame_cls = frame_transform_graph.lookup_name(attr) if frame_cls is not None and self.frame.is_transformable_to(frame_cls): raise AttributeError(f"'{attr}' is immutable") if attr in frame_transform_graph.frame_attributes: # All possible frame attributes can be set, but only via a private # variable. See __getattr__ above. super().__setattr__('_' + attr, val) # Validate it frame_transform_graph.frame_attributes[attr].__get__(self) # And add to set of extra attributes self._extra_frameattr_names |= {attr} else: # Otherwise, do the standard Python attribute setting super().__setattr__(attr, val) def __delattr__(self, attr): # mirror __setattr__ above if '_sky_coord_frame' in self.__dict__: if self._is_name(attr): raise AttributeError(f"'{attr}' is immutable") if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): delattr(self._sky_coord_frame, attr) return frame_cls = frame_transform_graph.lookup_name(attr) if frame_cls is not None and self.frame.is_transformable_to(frame_cls): raise AttributeError(f"'{attr}' is immutable") if attr in frame_transform_graph.frame_attributes: # All possible frame attributes can be deleted, but need to remove # the corresponding private variable. See __getattr__ above. super().__delattr__('_' + attr) # Also remove it from the set of extra attributes self._extra_frameattr_names -= {attr} else: # Otherwise, do the standard Python attribute setting super().__delattr__(attr) @override__dir__ def __dir__(self): """ Override the builtin `dir` behavior to include: - Transforms available by aliases - Attribute / methods of the underlying self.frame object """ # determine the aliases that this can be transformed to. dir_values = set() for name in frame_transform_graph.get_names(): frame_cls = frame_transform_graph.lookup_name(name) if self.frame.is_transformable_to(frame_cls): dir_values.add(name) # Add public attributes of self.frame dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_'))) # Add all possible frame attributes dir_values.update(frame_transform_graph.frame_attributes.keys()) return dir_values def __repr__(self): clsnm = self.__class__.__name__ coonm = self.frame.__class__.__name__ frameattrs = self.frame._frame_attrs_repr() if frameattrs: frameattrs = ': ' + frameattrs data = self.frame._data_repr() if data: data = ': ' + data return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals()) def to_string(self, style='decimal', **kwargs): """ A string representation of the coordinates. The default styles definitions are:: 'decimal': 'lat': {'decimal': True, 'unit': "deg"} 'lon': {'decimal': True, 'unit': "deg"} 'dms': 'lat': {'unit': "deg"} 'lon': {'unit': "deg"} 'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"} 'lon': {'pad': True, 'unit': "hour"} See :meth:`~astropy.coordinates.Angle.to_string` for details and keyword arguments (the two angles forming the coordinates are are both :class:`~astropy.coordinates.Angle` instances). Keyword arguments have precedence over the style defaults and are passed to :meth:`~astropy.coordinates.Angle.to_string`. Parameters ---------- style : {'hmsdms', 'dms', 'decimal'} The formatting specification to use. These encode the three most common ways to represent coordinates. The default is `decimal`. kwargs Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`. """ sph_coord = self.frame.represent_as(SphericalRepresentation) styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True}, 'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}}, 'dms': {'lonargs': {'unit': u.degree}, 'latargs': {'unit': u.degree}}, 'decimal': {'lonargs': {'unit': u.degree, 'decimal': True}, 'latargs': {'unit': u.degree, 'decimal': True}} } lonargs = {} latargs = {} if style in styles: lonargs.update(styles[style]['lonargs']) latargs.update(styles[style]['latargs']) else: raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}") lonargs.update(kwargs) latargs.update(kwargs) if np.isscalar(sph_coord.lon.value): coord_string = (sph_coord.lon.to_string(**lonargs) + " " + sph_coord.lat.to_string(**latargs)) else: coord_string = [] for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()): coord_string += [(lonangle.to_string(**lonargs) + " " + latangle.to_string(**latargs))] if len(sph_coord.shape) > 1: coord_string = np.array(coord_string).reshape(sph_coord.shape) return coord_string def to_table(self): """ Convert this |SkyCoord| to a |QTable|. Any attributes that have the same length as the |SkyCoord| will be converted to columns of the |QTable|. All other attributes will be recorded as metadata. Returns ------- `~astropy.table.QTable` A |QTable| containing the data of this |SkyCoord|. Examples -------- >>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg, ... obstime=Time([2000, 2010], format='jyear')) >>> t = sc.to_table() >>> t <QTable length=2> ra dec obstime deg deg float64 float64 Time ------- ------- ------- 40.0 0.0 2000.0 70.0 -20.0 2010.0 >>> t.meta {'representation_type': 'spherical', 'frame': 'icrs'} """ self_as_dict = self.info._represent_as_dict() tabledata = {} metadata = {} # Record attributes that have the same length as self as columns in the # table, and the other attributes as table metadata. This matches # table.serialize._represent_mixin_as_column(). for key, value in self_as_dict.items(): if getattr(value, 'shape', ())[:1] == (len(self),): tabledata[key] = value else: metadata[key] = value return QTable(tabledata, meta=metadata) def is_equivalent_frame(self, other): """ Checks if this object's frame as the same as that of the ``other`` object. To be the same frame, two objects must be the same frame class and have the same frame attributes. For two `SkyCoord` objects, *all* of the frame attributes have to match, not just those relevant for the object's frame. Parameters ---------- other : SkyCoord or BaseCoordinateFrame The other object to check. Returns ------- isequiv : bool True if the frames are the same, False if not. Raises ------ TypeError If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass. """ if isinstance(other, BaseCoordinateFrame): return self.frame.is_equivalent_frame(other) elif isinstance(other, SkyCoord): if other.frame.name != self.frame.name: return False for fattrnm in frame_transform_graph.frame_attributes: if not BaseCoordinateFrame._frameattr_equiv(getattr(self, fattrnm), getattr(other, fattrnm)): return False return True else: # not a BaseCoordinateFrame nor a SkyCoord object raise TypeError("Tried to do is_equivalent_frame on something that " "isn't frame-like") # High-level convenience methods def separation(self, other): """ Computes on-sky separation between this coordinate and another. .. note:: If the ``other`` coordinate object is in a different frame, it is first transformed to the frame of this object. This can lead to unintuitive behavior if not accounted for. Particularly of note is that ``self.separation(other)`` and ``other.separation(self)`` may not give the same answer in this case. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinate to get the separation to. Returns ------- sep : `~astropy.coordinates.Angle` The on-sky separation between this and the ``other`` coordinate. Notes ----- The separation is calculated using the Vincenty formula, which is stable at all locations, including poles and antipodes [1]_. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance """ from . import Angle from .angle_utilities import angular_separation if not self.is_equivalent_frame(other): try: kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {} other = other.transform_to(self, **kwargs) except TypeError: raise TypeError('Can only get separation to another SkyCoord ' 'or a coordinate frame with data') lon1 = self.spherical.lon lat1 = self.spherical.lat lon2 = other.spherical.lon lat2 = other.spherical.lat # Get the separation as a Quantity, convert to Angle in degrees sep = angular_separation(lon1, lat1, lon2, lat2) return Angle(sep, unit=u.degree) def separation_3d(self, other): """ Computes three dimensional separation between this coordinate and another. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinate to get the separation to. Returns ------- sep : `~astropy.coordinates.Distance` The real-space distance between these two coordinates. Raises ------ ValueError If this or the other coordinate do not have distances. """ if not self.is_equivalent_frame(other): try: kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {} other = other.transform_to(self, **kwargs) except TypeError: raise TypeError('Can only get separation to another SkyCoord ' 'or a coordinate frame with data') if issubclass(self.data.__class__, UnitSphericalRepresentation): raise ValueError('This object does not have a distance; cannot ' 'compute 3d separation.') if issubclass(other.data.__class__, UnitSphericalRepresentation): raise ValueError('The other object does not have a distance; ' 'cannot compute 3d separation.') c1 = self.cartesian.without_differentials() c2 = other.cartesian.without_differentials() return Distance((c1 - c2).norm()) def spherical_offsets_to(self, tocoord): r""" Computes angular offsets to go *from* this coordinate *to* another. Parameters ---------- tocoord : `~astropy.coordinates.BaseCoordinateFrame` The coordinate to find the offset to. Returns ------- lon_offset : `~astropy.coordinates.Angle` The angular offset in the longitude direction. The definition of "longitude" depends on this coordinate's frame (e.g., RA for equatorial coordinates). lat_offset : `~astropy.coordinates.Angle` The angular offset in the latitude direction. The definition of "latitude" depends on this coordinate's frame (e.g., Dec for equatorial coordinates). Raises ------ ValueError If the ``tocoord`` is not in the same frame as this one. This is different from the behavior of the `separation`/`separation_3d` methods because the offset components depend critically on the specific choice of frame. Notes ----- This uses the sky offset frame machinery, and hence will produce a new sky offset frame if one does not already exist for this object's frame class. See Also -------- separation : for the *total* angular offset (not broken out into components). position_angle : for the direction of the offset. """ if not self.is_equivalent_frame(tocoord): raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!') aframe = self.skyoffset_frame() acoord = tocoord.transform_to(aframe) dlon = acoord.spherical.lon.view(Angle) dlat = acoord.spherical.lat.view(Angle) return dlon, dlat def spherical_offsets_by(self, d_lon, d_lat): """ Computes the coordinate that is a specified pair of angular offsets away from this coordinate. Parameters ---------- d_lon : angle-like The angular offset in the longitude direction. The definition of "longitude" depends on this coordinate's frame (e.g., RA for equatorial coordinates). d_lat : angle-like The angular offset in the latitude direction. The definition of "latitude" depends on this coordinate's frame (e.g., Dec for equatorial coordinates). Returns ------- newcoord : `~astropy.coordinates.SkyCoord` The coordinates for the location that corresponds to offsetting by ``d_lat`` in the latitude direction and ``d_lon`` in the longitude direction. Notes ----- This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the transformation. For a more complete set of transform offsets, use `~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually. This specific method can be reproduced by doing ``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``. See Also -------- spherical_offsets_to : compute the angular offsets to another coordinate directional_offset_by : offset a coordinate by an angle in a direction """ return self.__class__( SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)) def directional_offset_by(self, position_angle, separation): """ Computes coordinates at the given offset from this coordinate. Parameters ---------- position_angle : `~astropy.coordinates.Angle` position_angle of offset separation : `~astropy.coordinates.Angle` offset angular separation Returns ------- newpoints : `~astropy.coordinates.SkyCoord` The coordinates for the location that corresponds to offsetting by the given `position_angle` and `separation`. Notes ----- Returned SkyCoord frame retains only the frame attributes that are for the resulting frame type. (e.g. if the input frame is `~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but an ``obstime`` will not.) For a more complete set of transform offsets, use `~astropy.wcs.WCS`. `~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to create a spherical frame with (lat=0, lon=0) at a reference point, approximating an xy cartesian system for small offsets. This method is distinct in that it is accurate on the sphere. See Also -------- position_angle : inverse operation for the ``position_angle`` component separation : inverse operation for the ``separation`` component """ from . import angle_utilities slat = self.represent_as(UnitSphericalRepresentation).lat slon = self.represent_as(UnitSphericalRepresentation).lon newlon, newlat = angle_utilities.offset_by( lon=slon, lat=slat, posang=position_angle, distance=separation) return SkyCoord(newlon, newlat, frame=self.frame) def match_to_catalog_sky(self, catalogcoord, nthneighbor=1): """ Finds the nearest on-sky matches of this coordinate in a set of catalog coordinates. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each of this object's coordinates. Shape matches this object. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. Unless both this and ``catalogcoord`` have associated distances, this quantity assumes that all sources are at a distance of 1 (dimensionless). Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. See Also -------- astropy.coordinates.match_coordinates_sky SkyCoord.match_to_catalog_3d """ from .matching import match_coordinates_sky if not (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame)) and catalogcoord.has_data): raise TypeError('Can only get separation to another SkyCoord or a ' 'coordinate frame with data') res = match_coordinates_sky(self, catalogcoord, nthneighbor=nthneighbor, storekdtree='_kdtree_sky') return res def match_to_catalog_3d(self, catalogcoord, nthneighbor=1): """ Finds the nearest 3-dimensional matches of this coordinate to a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in this object or the ``catalogcoord`` object. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). Returns ------- idx : int array Indices into ``catalogcoord`` to get the matched points for each of this object's coordinates. Shape matches this object. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the closest match for each element in this object in ``catalogcoord``. Shape matches this object. Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. See Also -------- astropy.coordinates.match_coordinates_3d SkyCoord.match_to_catalog_sky """ from .matching import match_coordinates_3d if not (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame)) and catalogcoord.has_data): raise TypeError('Can only get separation to another SkyCoord or a ' 'coordinate frame with data') res = match_coordinates_3d(self, catalogcoord, nthneighbor=nthneighbor, storekdtree='_kdtree_3d') return res def search_around_sky(self, searcharoundcoords, seplimit): """ Searches for all coordinates in this object around a supplied set of points within a given on-sky separation. This is intended for use on `~astropy.coordinates.SkyCoord` objects with coordinate arrays, rather than a scalar coordinate. For a scalar coordinate, it is better to use `~astropy.coordinates.SkyCoord.separation`. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- searcharoundcoords : coordinate-like The coordinates to search around to try to find matching points in this `SkyCoord`. This should be an object with array coordinates, not a scalar coordinate object. seplimit : `~astropy.units.Quantity` ['angle'] The on-sky separation to search within. Returns ------- idxsearcharound : int array Indices into ``searcharoundcoords`` that match the corresponding elements of ``idxself``. Shape matches ``idxself``. idxself : int array Indices into ``self`` that match the corresponding elements of ``idxsearcharound``. Shape matches ``idxsearcharound``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. See Also -------- astropy.coordinates.search_around_sky SkyCoord.search_around_3d """ from .matching import search_around_sky return search_around_sky(searcharoundcoords, self, seplimit, storekdtree='_kdtree_sky') def search_around_3d(self, searcharoundcoords, distlimit): """ Searches for all coordinates in this object around a supplied set of points within a given 3D radius. This is intended for use on `~astropy.coordinates.SkyCoord` objects with coordinate arrays, rather than a scalar coordinate. For a scalar coordinate, it is better to use `~astropy.coordinates.SkyCoord.separation_3d`. For more on how to use this (and related) functionality, see the examples in :doc:`astropy:/coordinates/matchsep`. Parameters ---------- searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinates to search around to try to find matching points in this `SkyCoord`. This should be an object with array coordinates, not a scalar coordinate object. distlimit : `~astropy.units.Quantity` ['length'] The physical radius to search within. Returns ------- idxsearcharound : int array Indices into ``searcharoundcoords`` that match the corresponding elements of ``idxself``. Shape matches ``idxself``. idxself : int array Indices into ``self`` that match the corresponding elements of ``idxsearcharound``. Shape matches ``idxsearcharound``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. dist3d : `~astropy.units.Quantity` ['length'] The 3D distance between the coordinates. Shape matches ``idxsearcharound`` and ``idxself``. Notes ----- This method requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. See Also -------- astropy.coordinates.search_around_3d SkyCoord.search_around_sky """ from .matching import search_around_3d return search_around_3d(searcharoundcoords, self, distlimit, storekdtree='_kdtree_3d') def position_angle(self, other): """ Computes the on-sky position angle (East of North) between this `SkyCoord` and another. Parameters ---------- other : `SkyCoord` The other coordinate to compute the position angle to. It is treated as the "head" of the vector of the position angle. Returns ------- pa : `~astropy.coordinates.Angle` The (positive) position angle of the vector pointing from ``self`` to ``other``. If either ``self`` or ``other`` contain arrays, this will be an array following the appropriate `numpy` broadcasting rules. Examples -------- >>> c1 = SkyCoord(0*u.deg, 0*u.deg) >>> c2 = SkyCoord(1*u.deg, 0*u.deg) >>> c1.position_angle(c2).degree 90.0 >>> c3 = SkyCoord(1*u.deg, 1*u.deg) >>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP 44.995636455344844 """ from . import angle_utilities if not self.is_equivalent_frame(other): try: other = other.transform_to(self, merge_attributes=False) except TypeError: raise TypeError('Can only get position_angle to another ' 'SkyCoord or a coordinate frame with data') slat = self.represent_as(UnitSphericalRepresentation).lat slon = self.represent_as(UnitSphericalRepresentation).lon olat = other.represent_as(UnitSphericalRepresentation).lat olon = other.represent_as(UnitSphericalRepresentation).lon return angle_utilities.position_angle(slon, slat, olon, olat) def skyoffset_frame(self, rotation=None): """ Returns the sky offset frame with this `SkyCoord` at the origin. Returns ------- astrframe : `~astropy.coordinates.SkyOffsetFrame` A sky offset frame of the same type as this `SkyCoord` (e.g., if this object has an ICRS coordinate, the resulting frame is SkyOffsetICRS, with the origin set to this object) rotation : angle-like The final rotation of the frame about the ``origin``. The sign of the rotation is the left-hand rule. That is, an object at a particular position angle in the un-rotated system will be sent to the positive latitude (z) direction in the final frame. """ return SkyOffsetFrame(origin=self, rotation=rotation) def get_constellation(self, short_name=False, constellation_list='iau'): """ Determines the constellation(s) of the coordinates this `SkyCoord` contains. Parameters ---------- short_name : bool If True, the returned names are the IAU-sanctioned abbreviated names. Otherwise, full names for the constellations are used. constellation_list : str The set of constellations to use. Currently only ``'iau'`` is supported, meaning the 88 "modern" constellations endorsed by the IAU. Returns ------- constellation : str or string array If this is a scalar coordinate, returns the name of the constellation. If it is an array `SkyCoord`, it returns an array of names. Notes ----- To determine which constellation a point on the sky is in, this first precesses to B1875, and then uses the Delporte boundaries of the 88 modern constellations, as tabulated by `Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_. See Also -------- astropy.coordinates.get_constellation """ from .funcs import get_constellation # because of issue #7028, the conversion to a PrecessedGeocentric # system fails in some cases. Work around is to drop the velocities. # they are not needed here since only position information is used extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names} novel = SkyCoord(self.realize_frame(self.data.without_differentials()), **extra_frameattrs) return get_constellation(novel, short_name, constellation_list) # the simpler version below can be used when gh-issue #7028 is resolved # return get_constellation(self, short_name, constellation_list) # WCS pixel to/from sky conversions def to_pixel(self, wcs, origin=0, mode='all'): """ Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS` object. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to use for convert origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- xp, yp : `numpy.ndarray` The pixel coordinates See Also -------- astropy.wcs.utils.skycoord_to_pixel : the implementation of this method """ from astropy.wcs.utils import skycoord_to_pixel return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode) @classmethod def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'): """ Create a new `SkyCoord` from pixel coordinates using an `~astropy.wcs.WCS` object. Parameters ---------- xp, yp : float or ndarray The coordinates to convert. wcs : `~astropy.wcs.WCS` The WCS to use for convert origin : int Whether to return 0 or 1-based pixel coordinates. mode : 'all' or 'wcs' Whether to do the transformation including distortions (``'all'``) or only including only the core WCS transformation (``'wcs'``). Returns ------- coord : `~astropy.coordinates.SkyCoord` A new object with sky coordinates corresponding to the input ``xp`` and ``yp``. See Also -------- to_pixel : to do the inverse operation astropy.wcs.utils.pixel_to_skycoord : the implementation of this method """ from astropy.wcs.utils import pixel_to_skycoord return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls) def contained_by(self, wcs, image=None, **kwargs): """ Determines if the SkyCoord is contained in the given wcs footprint. Parameters ---------- wcs : `~astropy.wcs.WCS` The coordinate to check if it is within the wcs coordinate. image : array Optional. The image associated with the wcs object that the cooordinate is being checked against. If not given the naxis keywords will be used to determine if the coordinate falls within the wcs footprint. **kwargs : Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel` Returns ------- response : bool True means the WCS footprint contains the coordinate, False means it does not. """ if image is not None: ymax, xmax = image.shape else: xmax, ymax = wcs._naxis import warnings with warnings.catch_warnings(): # Suppress warnings since they just mean we didn't find the coordinate warnings.simplefilter("ignore") try: x, y = self.to_pixel(wcs, **kwargs) except Exception: return False return (x < xmax) & (x > 0) & (y < ymax) & (y > 0) def radial_velocity_correction(self, kind='barycentric', obstime=None, location=None): """ Compute the correction required to convert a radial velocity at a given time and place on the Earth's Surface to a barycentric or heliocentric velocity. Parameters ---------- kind : str The kind of velocity correction. Must be 'barycentric' or 'heliocentric'. obstime : `~astropy.time.Time` or None, optional The time at which to compute the correction. If `None`, the ``obstime`` frame attribute on the `SkyCoord` will be used. location : `~astropy.coordinates.EarthLocation` or None, optional The observer location at which to compute the correction. If `None`, the ``location`` frame attribute on the passed-in ``obstime`` will be used, and if that is None, the ``location`` frame attribute on the `SkyCoord` will be used. Raises ------ ValueError If either ``obstime`` or ``location`` are passed in (not ``None``) when the frame attribute is already set on this `SkyCoord`. TypeError If ``obstime`` or ``location`` aren't provided, either as arguments or as frame attributes. Returns ------- vcorr : `~astropy.units.Quantity` ['speed'] The correction with a positive sign. I.e., *add* this to an observed radial velocity to get the barycentric (or heliocentric) velocity. If m/s precision or better is needed, see the notes below. Notes ----- The barycentric correction is calculated to higher precision than the heliocentric correction and includes additional physics (e.g time dilation). Use barycentric corrections if m/s precision is required. The algorithm here is sufficient to perform corrections at the mm/s level, but care is needed in application. The barycentric correction returned uses the optical approximation v = z * c. Strictly speaking, the barycentric correction is multiplicative and should be applied as:: >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord, EarthLocation >>> from astropy.constants import c >>> t = Time(56370.5, format='mjd', scale='utc') >>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m) >>> sc = SkyCoord(1*u.deg, 2*u.deg) >>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA >>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP Also note that this method returns the correction velocity in the so-called *optical convention*:: >>> vcorr = zb * c # doctest: +SKIP where ``zb`` is the barycentric correction redshift as defined in section 3 of Wright & Eastman (2014). The application formula given above follows from their equation (11) under assumption that the radial velocity ``rv`` has also been defined using the same optical convention. Note, this can be regarded as a matter of velocity definition and does not by itself imply any loss of accuracy, provided sufficient care has been taken during interpretation of the results. If you need the barycentric correction expressed as the full relativistic velocity (e.g., to provide it as the input to another software which performs the application), the following recipe can be used:: >>> zb = vcorr / c # doctest: +REMOTE_DATA >>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA >>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA or alternatively using just equivalencies:: >>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA See also `~astropy.units.equivalencies.doppler_optical`, `~astropy.units.equivalencies.doppler_radio`, and `~astropy.units.equivalencies.doppler_relativistic` for more information on the velocity conventions. The default is for this method to use the builtin ephemeris for computing the sun and earth location. Other ephemerides can be chosen by setting the `~astropy.coordinates.solar_system_ephemeris` variable, either directly or via ``with`` statement. For example, to use the JPL ephemeris, do:: >>> from astropy.coordinates import solar_system_ephemeris >>> sc = SkyCoord(1*u.deg, 2*u.deg) >>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA ... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP """ # has to be here to prevent circular imports from .solar_system import get_body_barycentric_posvel # location validation timeloc = getattr(obstime, 'location', None) if location is None: if self.location is not None: location = self.location if timeloc is not None: raise ValueError('`location` cannot be in both the ' 'passed-in `obstime` and this `SkyCoord` ' 'because it is ambiguous which is meant ' 'for the radial_velocity_correction.') elif timeloc is not None: location = timeloc else: raise TypeError('Must provide a `location` to ' 'radial_velocity_correction, either as a ' 'SkyCoord frame attribute, as an attribute on ' 'the passed in `obstime`, or in the method ' 'call.') elif self.location is not None or timeloc is not None: raise ValueError('Cannot compute radial velocity correction if ' '`location` argument is passed in and there is ' 'also a `location` attribute on this SkyCoord or ' 'the passed-in `obstime`.') # obstime validation coo_at_rv_obstime = self # assume we need no space motion for now if obstime is None: obstime = self.obstime if obstime is None: raise TypeError('Must provide an `obstime` to ' 'radial_velocity_correction, either as a ' 'SkyCoord frame attribute or in the method ' 'call.') elif self.obstime is not None and self.frame.data.differentials: # we do need space motion after all coo_at_rv_obstime = self.apply_space_motion(obstime) elif self.obstime is None: # warn the user if the object has differentials set if 's' in self.data.differentials: warnings.warn( "SkyCoord has space motion, and therefore the specified " "position of the SkyCoord may not be the same as " "the `obstime` for the radial velocity measurement. " "This may affect the rv correction at the order of km/s" "for very high proper motions sources. If you wish to " "apply space motion of the SkyCoord to correct for this" "the `obstime` attribute of the SkyCoord must be set", AstropyUserWarning ) pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime) if kind == 'barycentric': v_origin_to_earth = v_earth elif kind == 'heliocentric': v_sun = get_body_barycentric_posvel('sun', obstime)[1] v_origin_to_earth = v_earth - v_sun else: raise ValueError("`kind` argument to radial_velocity_correction must " "be 'barycentric' or 'heliocentric', but got " "'{}'".format(kind)) gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime) # transforming to GCRS is not the correct thing to do here, since we don't want to # include aberration (or light deflection)? Instead, only apply parallax if necessary icrs_cart = coo_at_rv_obstime.icrs.cartesian icrs_cart_novel = icrs_cart.without_differentials() if self.data.__class__ is UnitSphericalRepresentation: targcart = icrs_cart_novel else: # skycoord has distances so apply parallax obs_icrs_cart = pos_earth + gcrs_p targcart = icrs_cart_novel - obs_icrs_cart targcart /= targcart.norm() if kind == 'barycentric': beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2) gr = location.gravitational_redshift(obstime) # barycentric redshift according to eq 28 in Wright & Eastmann (2014), # neglecting Shapiro delay and effects of the star's own motion zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr/speed_of_light) # try and get terms corresponding to stellar motion. if icrs_cart.differentials: try: ro = self.icrs.cartesian beta_star = ro.differentials['s'].to_cartesian() / speed_of_light # ICRS unit vector at coordinate epoch ro = ro.without_differentials() ro /= ro.norm() zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart)) except u.UnitConversionError: warnings.warn("SkyCoord contains some velocity information, but not enough to " "calculate the full space motion of the source, and so this has " "been ignored for the purposes of calculating the radial velocity " "correction. This can lead to errors on the order of metres/second.", AstropyUserWarning) zb = zb - 1 return zb * speed_of_light else: # do a simpler correction ignoring time dilation and gravitational redshift # this is adequate since Heliocentric corrections shouldn't be used if # cm/s precision is required. return targcart.dot(v_origin_to_earth + gcrs_v) # Table interactions @classmethod def guess_from_table(cls, table, **coord_kwargs): r""" A convenience method to create and return a new `SkyCoord` from the data in an astropy Table. This method matches table columns that start with the case-insensitive names of the the components of the requested frames (including differentials), if they are also followed by a non-alphanumeric character. It will also match columns that *end* with the component name if a non-alphanumeric character is *before* it. For example, the first rule means columns with names like ``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for `~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'`` are *not*. Similarly, the second rule applied to the `~astropy.coordinates.Galactic` frame means that a column named ``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or ``'fill'`` will not. The definition of alphanumeric here is based on Unicode's definition of alphanumeric, except without ``_`` (which is normally considered alphanumeric). So for ASCII, this means the non-alphanumeric characters are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``). Parameters ---------- table : `~astropy.table.Table` or subclass The table to load data from. **coord_kwargs Any additional keyword arguments are passed directly to this class's constructor. Returns ------- newsc : `~astropy.coordinates.SkyCoord` or subclass The new `SkyCoord` (or subclass) object. Raises ------ ValueError If more than one match is found in the table for a component, unless the additional matches are also valid frame component names. If a "coord_kwargs" is provided for a value also found in the table. """ _frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs) frame = _frame_cls(**_frame_kwargs) coord_kwargs['frame'] = coord_kwargs.get('frame', frame) representation_component_names = ( set(frame.get_representation_component_names()) .union(set(frame.get_representation_component_names("s"))) ) comp_kwargs = {} for comp_name in representation_component_names: # this matches things like 'ra[...]'' but *not* 'rad'. # note that the "_" must be in there explicitly, because # "alphanumeric" usually includes underscores. starts_with_comp = comp_name + r'(\W|\b|_)' # this part matches stuff like 'center_ra', but *not* # 'aura' ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b' # the final regex ORs together the two patterns rex = re.compile(rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE) # find all matches matches = {col_name for col_name in table.colnames if rex.match(col_name)} # now need to select among matches, also making sure we don't have # an exact match with another component if len(matches) == 0: # no matches continue elif len(matches) == 1: # only one match col_name = matches.pop() else: # more than 1 match # try to sieve out other components matches -= representation_component_names - {comp_name} # if there's only one remaining match, it worked. if len(matches) == 1: col_name = matches.pop() else: raise ValueError( 'Found at least two matches for component ' f'"{comp_name}": "{matches}". Cannot guess coordinates ' 'from a table with this ambiguity.') comp_kwargs[comp_name] = table[col_name] for k, v in comp_kwargs.items(): if k in coord_kwargs: raise ValueError('Found column "{}" in table, but it was ' 'already provided as "{}" keyword to ' 'guess_from_table function.'.format(v.name, k)) else: coord_kwargs[k] = v return cls(**coord_kwargs) # Name resolve @classmethod def from_name(cls, name, frame='icrs', parse=False, cache=True): """ Given a name, query the CDS name resolver to attempt to retrieve coordinate information for that object. The search database, sesame url, and query timeout can be set through configuration items in ``astropy.coordinates.name_resolve`` -- see docstring for `~astropy.coordinates.get_icrs_coordinates` for more information. Parameters ---------- name : str The name of the object to get coordinates for, e.g. ``'M42'``. frame : str or `BaseCoordinateFrame` class or instance The frame to transform the object to. parse: bool Whether to attempt extracting the coordinates from the name by parsing with a regex. For objects catalog names that have J-coordinates embedded in their names, e.g., 'CRTS SSS100805 J194428-420209', this may be much faster than a Sesame query for the same object name. The coordinates extracted in this way may differ from the database coordinates by a few deci-arcseconds, so only use this option if you do not need sub-arcsecond accuracy for coordinates. cache : bool, optional Determines whether to cache the results or not. To update or overwrite an existing value, pass ``cache='update'``. Returns ------- coord : SkyCoord Instance of the SkyCoord class. """ from .name_resolve import get_icrs_coordinates icrs_coord = get_icrs_coordinates(name, parse, cache=cache) icrs_sky_coord = cls(icrs_coord) if frame in ('icrs', icrs_coord.__class__): return icrs_sky_coord else: return icrs_sky_coord.transform_to(frame)
87122ff8ea289169904b7fa7e8d1a541bbe090c30b749ad0c6e0fcd17ac68450
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # This file was automatically generated from ply. To re-generate this file, # remove it from this folder, then build astropy and run the tests in-place: # # python setup.py build_ext --inplace # pytest astropy/coordinates # # You can then commit the changes to this file. # angle_lextab.py. This file automatically created by PLY (version 3.11). Don't edit! _tabversion = '3.10' _lextokens = set(('COLON', 'DEGREE', 'EASTWEST', 'HOUR', 'MINUTE', 'NORTHSOUTH', 'SECOND', 'SIGN', 'SIMPLE_UNIT', 'UFLOAT', 'UINT')) _lexreflags = 64 _lexliterals = '' _lexstateinfo = {'INITIAL': 'inclusive'} _lexstatere = {'INITIAL': [('(?P<t_UFLOAT>((\\d+\\.\\d*)|(\\.\\d+))([eE][+-−]?\\d+)?)|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+−-])|(?P<t_EASTWEST>[EW]$)|(?P<t_NORTHSOUTH>[NS]$)|(?P<t_SIMPLE_UNIT>(?:Earcmin)|(?:Earcsec)|(?:Edeg)|(?:Erad)|(?:Garcmin)|(?:Garcsec)|(?:Gdeg)|(?:Grad)|(?:Marcmin)|(?:Marcsec)|(?:Mdeg)|(?:Mrad)|(?:Parcmin)|(?:Parcsec)|(?:Pdeg)|(?:Prad)|(?:Tarcmin)|(?:Tarcsec)|(?:Tdeg)|(?:Trad)|(?:Yarcmin)|(?:Yarcsec)|(?:Ydeg)|(?:Yrad)|(?:Zarcmin)|(?:Zarcsec)|(?:Zdeg)|(?:Zrad)|(?:aarcmin)|(?:aarcsec)|(?:adeg)|(?:arad)|(?:arcmin)|(?:arcminute)|(?:arcsec)|(?:arcsecond)|(?:attoarcminute)|(?:attoarcsecond)|(?:attodegree)|(?:attoradian)|(?:carcmin)|(?:carcsec)|(?:cdeg)|(?:centiarcminute)|(?:centiarcsecond)|(?:centidegree)|(?:centiradian)|(?:crad)|(?:cy)|(?:cycle)|(?:daarcmin)|(?:daarcsec)|(?:dadeg)|(?:darad)|(?:darcmin)|(?:darcsec)|(?:ddeg)|(?:decaarcminute)|(?:decaarcsecond)|(?:decadegree)|(?:decaradian)|(?:deciarcminute)|(?:deciarcsecond)|(?:decidegree)|(?:deciradian)|(?:dekaarcminute)|(?:dekaarcsecond)|(?:dekadegree)|(?:dekaradian)|(?:drad)|(?:exaarcminute)|(?:exaarcsecond)|(?:exadegree)|(?:exaradian)|(?:farcmin)|(?:farcsec)|(?:fdeg)|(?:femtoarcminute)|(?:femtoarcsecond)|(?:femtodegree)|(?:femtoradian)|(?:frad)|(?:gigaarcminute)|(?:gigaarcsecond)|(?:gigadegree)|(?:gigaradian)|(?:harcmin)|(?:harcsec)|(?:hdeg)|(?:hectoarcminute)|(?:hectoarcsecond)|(?:hectodegree)|(?:hectoradian)|(?:hrad)|(?:karcmin)|(?:karcsec)|(?:kdeg)|(?:kiloarcminute)|(?:kiloarcsecond)|(?:kilodegree)|(?:kiloradian)|(?:krad)|(?:marcmin)|(?:marcsec)|(?:mas)|(?:mdeg)|(?:megaarcminute)|(?:megaarcsecond)|(?:megadegree)|(?:megaradian)|(?:microarcminute)|(?:microarcsecond)|(?:microdegree)|(?:microradian)|(?:milliarcminute)|(?:milliarcsecond)|(?:millidegree)|(?:milliradian)|(?:mrad)|(?:nanoarcminute)|(?:nanoarcsecond)|(?:nanodegree)|(?:nanoradian)|(?:narcmin)|(?:narcsec)|(?:ndeg)|(?:nrad)|(?:parcmin)|(?:parcsec)|(?:pdeg)|(?:petaarcminute)|(?:petaarcsecond)|(?:petadegree)|(?:petaradian)|(?:picoarcminute)|(?:picoarcsecond)|(?:picodegree)|(?:picoradian)|(?:prad)|(?:rad)|(?:radian)|(?:teraarcminute)|(?:teraarcsecond)|(?:teradegree)|(?:teraradian)|(?:uarcmin)|(?:uarcsec)|(?:uas)|(?:udeg)|(?:urad)|(?:yarcmin)|(?:yarcsec)|(?:ydeg)|(?:yoctoarcminute)|(?:yoctoarcsecond)|(?:yoctodegree)|(?:yoctoradian)|(?:yottaarcminute)|(?:yottaarcsecond)|(?:yottadegree)|(?:yottaradian)|(?:yrad)|(?:zarcmin)|(?:zarcsec)|(?:zdeg)|(?:zeptoarcminute)|(?:zeptoarcsecond)|(?:zeptodegree)|(?:zeptoradian)|(?:zettaarcminute)|(?:zettaarcsecond)|(?:zettadegree)|(?:zettaradian)|(?:zrad))|(?P<t_MINUTE>m(in(ute(s)?)?)?|′|\\\'|ᵐ)|(?P<t_SECOND>s(ec(ond(s)?)?)?|″|\\"|ˢ)|(?P<t_DEGREE>d(eg(ree(s)?)?)?|°)|(?P<t_HOUR>hour(s)?|h(r)?|ʰ)|(?P<t_COLON>:)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_EASTWEST', 'EASTWEST'), ('t_NORTHSOUTH', 'NORTHSOUTH'), ('t_SIMPLE_UNIT', 'SIMPLE_UNIT'), (None, 'MINUTE'), None, None, None, (None, 'SECOND'), None, None, None, (None, 'DEGREE'), None, None, None, (None, 'HOUR'), None, None, (None, 'COLON')])]} _lexstateignore = {'INITIAL': ' '} _lexstateerrorf = {'INITIAL': 't_error'} _lexstateeoff = {}
2b5eee75bebd14fecf3b03acbbc955006eb47fd15e43f5db5763cb5b48b97f24
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Currently the only site accessible without internet access is the Royal Greenwich Observatory, as an example (and for testing purposes). In future releases, a canonical set of sites may be bundled into astropy for when the online registry is unavailable. Additions or corrections to the observatory list can be submitted via Pull Request to the [astropy-data GitHub repository](https://github.com/astropy/astropy-data), updating the ``location.json`` file. """ import json from difflib import get_close_matches from collections.abc import Mapping from astropy.utils.data import get_pkg_data_contents, get_file_contents from .earth import EarthLocation from .errors import UnknownSiteException from astropy import units as u class SiteRegistry(Mapping): """ A bare-bones registry of EarthLocation objects. This acts as a mapping (dict-like object) but with the important caveat that it's always transforms its inputs to lower-case. So keys are always all lower-case, and even if you ask for something that's got mixed case, it will be interpreted as the all lower-case version. """ def __init__(self): # the keys to this are always lower-case self._lowercase_names_to_locations = {} # these can be whatever case is appropriate self._names = [] def __getitem__(self, site_name): """ Returns an EarthLocation for a known site in this registry. Parameters ---------- site_name : str Name of the observatory (case-insensitive). Returns ------- site : `~astropy.coordinates.EarthLocation` The location of the observatory. """ if site_name.lower() not in self._lowercase_names_to_locations: # If site name not found, find close matches and suggest them in error close_names = get_close_matches(site_name, self._lowercase_names_to_locations) close_names = sorted(close_names, key=len) raise UnknownSiteException(site_name, "the 'names' attribute", close_names=close_names) return self._lowercase_names_to_locations[site_name.lower()] def __len__(self): return len(self._lowercase_names_to_locations) def __iter__(self): return iter(self._lowercase_names_to_locations) def __contains__(self, site_name): return site_name.lower() in self._lowercase_names_to_locations @property def names(self): """ The names in this registry. Note that these are *not* exactly the same as the keys: keys are always lower-case, while `names` is what you should use for the actual readable names (which may be case-sensitive) Returns ------- site : list of str The names of the sites in this registry """ return sorted(self._names) def add_site(self, names, locationobj): """ Adds a location to the registry. Parameters ---------- names : list of str All the names this site should go under locationobj : `~astropy.coordinates.EarthLocation` The actual site object """ for name in names: self._lowercase_names_to_locations[name.lower()] = locationobj self._names.append(name) @classmethod def from_json(cls, jsondb): reg = cls() for site in jsondb: site_info = jsondb[site].copy() location = EarthLocation.from_geodetic(site_info.pop('longitude') * u.Unit(site_info.pop('longitude_unit')), site_info.pop('latitude') * u.Unit(site_info.pop('latitude_unit')), site_info.pop('elevation') * u.Unit(site_info.pop('elevation_unit'))) location.info.name = site_info.pop('name') aliases = site_info.pop('aliases') location.info.meta = site_info # whatever is left reg.add_site([site] + aliases, location) reg._loaded_jsondb = jsondb return reg def get_builtin_sites(): """ Load observatory database from data/observatories.json and parse them into a SiteRegistry. """ jsondb = json.loads(get_pkg_data_contents('data/sites.json')) return SiteRegistry.from_json(jsondb) def get_downloaded_sites(jsonurl=None): """ Load observatory database from data.astropy.org and parse into a SiteRegistry """ # we explicitly set the encoding because the default is to leave it set by # the users' locale, which may fail if it's not matched to the sites.json if jsonurl is None: content = get_pkg_data_contents('coordinates/sites.json', encoding='UTF-8') else: content = get_file_contents(jsonurl, encoding='UTF-8') jsondb = json.loads(content) return SiteRegistry.from_json(jsondb)
a6f98ac67a7295edbaaf85d6df817f784391b384cc1cc60710fba44f515f1205
import numpy as np from astropy.units import si from astropy.units import equivalencies as eq from astropy.units import Unit from astropy.units.quantity import SpecificTypeQuantity, Quantity from astropy.units.decorators import quantity_input __all__ = ['SpectralQuantity'] # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralQuantity.*'] KMS = si.km / si.s SPECTRAL_UNITS = (si.Hz, si.m, si.J, si.m ** -1, KMS) DOPPLER_CONVENTIONS = { 'radio': eq.doppler_radio, 'optical': eq.doppler_optical, 'relativistic': eq.doppler_relativistic } class SpectralQuantity(SpecificTypeQuantity): """ One or more value(s) with spectral units. The spectral units should be those for frequencies, wavelengths, energies, wavenumbers, or velocities (interpreted as Doppler velocities relative to a rest spectral value). The advantage of using this class over the regular `~astropy.units.Quantity` class is that in `SpectralQuantity`, the ``u.spectral`` equivalency is enabled by default (allowing automatic conversion between spectral units), and a preferred Doppler rest value and convention can be stored for easy conversion to/from velocities. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralQuantity` Spectral axis data values. unit : unit-like Unit for the given data. doppler_rest : `~astropy.units.Quantity` ['speed'], optional The rest value to use for conversions from/to velocities doppler_convention : str, optional The convention to use when converting the spectral data to/from velocities. """ _equivalent_unit = SPECTRAL_UNITS _include_easy_conversion_members = True def __new__(cls, value, unit=None, doppler_rest=None, doppler_convention=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # If we're initializing from an existing SpectralQuantity, keep any # parameters that aren't being overridden if doppler_rest is None: doppler_rest = getattr(value, 'doppler_rest', None) if doppler_convention is None: doppler_convention = getattr(value, 'doppler_convention', None) obj._doppler_rest = doppler_rest obj._doppler_convention = doppler_convention return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._doppler_rest = getattr(obj, '_doppler_rest', None) self._doppler_convention = getattr(obj, '_doppler_convention', None) def __quantity_subclass__(self, unit): # Always default to just returning a Quantity, unless we explicitly # choose to return a SpectralQuantity - even if the units match, we # want to avoid doing things like adding two SpectralQuantity instances # together and getting a SpectralQuantity back if unit is self.unit: return SpectralQuantity, True else: return Quantity, False def __array_ufunc__(self, function, method, *inputs, **kwargs): # We always return Quantity except in a few specific cases result = super().__array_ufunc__(function, method, *inputs, **kwargs) if ((function is np.multiply or function is np.true_divide and inputs[0] is self) and result.unit == self.unit or (function in (np.minimum, np.maximum, np.fmax, np.fmin) and method in ('reduce', 'reduceat'))): result = result.view(self.__class__) result.__array_finalize__(self) else: if result is self: raise TypeError(f"Cannot store the result of this operation in {self.__class__.__name__}") if result.dtype.kind == 'b': result = result.view(np.ndarray) else: result = result.view(Quantity) return result @property def doppler_rest(self): """ The rest value of the spectrum used for transformations to/from velocity space. Returns ------- `~astropy.units.Quantity` ['speed'] Rest value as an astropy `~astropy.units.Quantity` object. """ return self._doppler_rest @doppler_rest.setter @quantity_input(value=SPECTRAL_UNITS) def doppler_rest(self, value): """ New rest value needed for velocity-space conversions. Parameters ---------- value : `~astropy.units.Quantity` ['speed'] Rest value. """ if self._doppler_rest is not None: raise AttributeError("doppler_rest has already been set, and cannot " "be changed. Use the ``to`` method to convert " "the spectral values(s) to use a different " "rest value") self._doppler_rest = value @property def doppler_convention(self): """ The defined convention for conversions to/from velocity space. Returns ------- str One of 'optical', 'radio', or 'relativistic' representing the equivalency used in the unit conversions. """ return self._doppler_convention @doppler_convention.setter def doppler_convention(self, value): """ New velocity convention used for velocity space conversions. Parameters ---------- value Notes ----- More information on the equations dictating the transformations can be found in the astropy documentation [1]_. References ---------- .. [1] Astropy documentation: https://docs.astropy.org/en/stable/units/equivalencies.html#spectral-doppler-equivalencies """ if self._doppler_convention is not None: raise AttributeError("doppler_convention has already been set, and cannot " "be changed. Use the ``to`` method to convert " "the spectral values(s) to use a different " "convention") if value is not None and value not in DOPPLER_CONVENTIONS: raise ValueError(f"doppler_convention should be one of {'/'.join(sorted(DOPPLER_CONVENTIONS))}") self._doppler_convention = value @quantity_input(doppler_rest=SPECTRAL_UNITS) def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None): """ Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit. By default, the ``spectral`` equivalency will be enabled, as well as one of the Doppler equivalencies if converting to/from velocities. Parameters ---------- unit : unit-like An object that represents the unit to convert to. Must be an `~astropy.units.UnitBase` object or a string parseable by the `~astropy.units` package, and should be a spectral unit. equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional A list of equivalence pairs to try if the units are not directly convertible (along with spectral). See :ref:`astropy:unit_equivalencies`. If not provided or ``[]``, spectral equivalencies will be used. If `None`, no equivalencies will be applied at all, not even any set globally or within a context. doppler_rest : `~astropy.units.Quantity` ['speed'], optional The rest value used when converting to/from velocities. This will also be set at an attribute on the output `~astropy.coordinates.SpectralQuantity`. doppler_convention : {'relativistic', 'optical', 'radio'}, optional The Doppler convention used when converting to/from velocities. This will also be set at an attribute on the output `~astropy.coordinates.SpectralQuantity`. Returns ------- `SpectralQuantity` New spectral coordinate object with data converted to the new unit. """ # Make sure units can be passed as strings unit = Unit(unit) # If equivalencies is explicitly set to None, we should just use the # default Quantity.to with equivalencies also set to None if equivalencies is None: result = super().to(unit, equivalencies=None) result = result.view(self.__class__) result.__array_finalize__(self) return result # FIXME: need to consider case where doppler equivalency is passed in # equivalencies list, or is u.spectral equivalency is already passed if doppler_rest is None: doppler_rest = self._doppler_rest if doppler_convention is None: doppler_convention = self._doppler_convention elif doppler_convention not in DOPPLER_CONVENTIONS: raise ValueError(f"doppler_convention should be one of {'/'.join(sorted(DOPPLER_CONVENTIONS))}") if self.unit.is_equivalent(KMS) and unit.is_equivalent(KMS): # Special case: if the current and final units are both velocity, # and either the rest value or the convention are different, we # need to convert back to frequency temporarily. if doppler_convention is not None and self._doppler_convention is None: raise ValueError("Original doppler_convention not set") if doppler_rest is not None and self._doppler_rest is None: raise ValueError("Original doppler_rest not set") if doppler_rest is None and doppler_convention is None: result = super().to(unit, equivalencies=equivalencies) result = result.view(self.__class__) result.__array_finalize__(self) return result elif (doppler_rest is None) is not (doppler_convention is None): raise ValueError("Either both or neither doppler_rest and " "doppler_convention should be defined for " "velocity conversions") vel_equiv1 = DOPPLER_CONVENTIONS[self._doppler_convention](self._doppler_rest) freq = super().to(si.Hz, equivalencies=equivalencies + vel_equiv1) vel_equiv2 = DOPPLER_CONVENTIONS[doppler_convention](doppler_rest) result = freq.to(unit, equivalencies=equivalencies + vel_equiv2) else: additional_equivalencies = eq.spectral() if self.unit.is_equivalent(KMS) or unit.is_equivalent(KMS): if doppler_convention is None: raise ValueError("doppler_convention not set, cannot convert to/from velocities") if doppler_rest is None: raise ValueError("doppler_rest not set, cannot convert to/from velocities") additional_equivalencies = additional_equivalencies + DOPPLER_CONVENTIONS[doppler_convention](doppler_rest) result = super().to(unit, equivalencies=equivalencies + additional_equivalencies) # Since we have to explicitly specify when we want to keep this as a # SpectralQuantity, we need to convert it back from a Quantity to # a SpectralQuantity here. Note that we don't use __array_finalize__ # here since we might need to set the output doppler convention and # rest based on the parameters passed to 'to' result = result.view(self.__class__) result.__array_finalize__(self) result._doppler_convention = doppler_convention result._doppler_rest = doppler_rest return result def to_value(self, unit=None, *args, **kwargs): if unit is None: return self.view(np.ndarray) return self.to(unit, *args, **kwargs).value
a77360a077835fefcf25dffa2ba30c45ba20b3f29ae226cb737c6594ae701283
""" In this module, we define the coordinate representation classes, which are used to represent low-level cartesian, spherical, cylindrical, and other coordinates. """ import abc import functools import operator import inspect import warnings import numpy as np import astropy.units as u from erfa import ufunc as erfa_ufunc from .angles import Angle, Longitude, Latitude from .distances import Distance from .matrix_utilities import is_O3 from astropy.utils import ShapedLikeNDArray, classproperty from astropy.utils.data_info import MixinInfo from astropy.utils.exceptions import DuplicateRepresentationWarning __all__ = ["BaseRepresentationOrDifferential", "BaseRepresentation", "CartesianRepresentation", "SphericalRepresentation", "UnitSphericalRepresentation", "RadialRepresentation", "PhysicsSphericalRepresentation", "CylindricalRepresentation", "BaseDifferential", "CartesianDifferential", "BaseSphericalDifferential", "BaseSphericalCosLatDifferential", "SphericalDifferential", "SphericalCosLatDifferential", "UnitSphericalDifferential", "UnitSphericalCosLatDifferential", "RadialDifferential", "CylindricalDifferential", "PhysicsSphericalDifferential"] # Module-level dict mapping representation string alias names to classes. # This is populated by __init_subclass__ when called by Representation or # Differential classes so that they are all registered automatically. REPRESENTATION_CLASSES = {} DIFFERENTIAL_CLASSES = {} # set for tracking duplicates DUPLICATE_REPRESENTATIONS = set() # a hash for the content of the above two dicts, cached for speed. _REPRDIFF_HASH = None def _fqn_class(cls): ''' Get the fully qualified name of a class ''' return cls.__module__ + '.' + cls.__qualname__ def get_reprdiff_cls_hash(): """ Returns a hash value that should be invariable if the `REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not changed. """ global _REPRDIFF_HASH if _REPRDIFF_HASH is None: _REPRDIFF_HASH = (hash(tuple(REPRESENTATION_CLASSES.items())) + hash(tuple(DIFFERENTIAL_CLASSES.items()))) return _REPRDIFF_HASH def _invalidate_reprdiff_cls_hash(): global _REPRDIFF_HASH _REPRDIFF_HASH = None def _array2string(values, prefix=''): # Work around version differences for array2string. kwargs = {'separator': ', ', 'prefix': prefix} kwargs['formatter'] = {} return np.array2string(values, **kwargs) class BaseRepresentationOrDifferentialInfo(MixinInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attrs_from_parent = {'unit'} # Indicates unit is read-only _supports_indexing = False @staticmethod def default_format(val): # Create numpy dtype so that numpy formatting will work. components = val.components values = tuple(getattr(val, component).value for component in components) a = np.empty(getattr(val, 'shape', ()), [(component, value.dtype) for component, value in zip(components, values)]) for component, value in zip(components, values): a[component] = value return str(a) @property def _represent_as_dict_attrs(self): return self._parent.components @property def unit(self): if self._parent is None: return None unit = self._parent._unitstr return unit[1:-1] if unit.startswith('(') else unit def new_like(self, reps, length, metadata_conflicts='warn', name=None): """ Return a new instance like ``reps`` with ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- reps : list List of input representations or differentials. length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : `BaseRepresentation` or `BaseDifferential` subclass instance Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes(reps, metadata_conflicts, name, ('meta', 'description')) # Make a new representation or differential with the desired length # using the _apply / __getitem__ machinery to effectively return # rep0[[0, 0, ..., 0, 0]]. This will have the right shape, and # include possible differentials. indexes = np.zeros(length, dtype=np.int64) out = reps[0][indexes] # Use __setitem__ machinery to check whether all representations # can represent themselves as this one without loss of information. for rep in reps[1:]: try: out[0] = rep[0] except Exception as err: raise ValueError(f'input representations are inconsistent.') from err # Set (merged) info attributes. for attr in ('name', 'meta', 'description'): if attr in attrs: setattr(out.info, attr, attrs[attr]) return out class BaseRepresentationOrDifferential(ShapedLikeNDArray): """3D coordinate representations and differentials. Parameters ---------- comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass The components of the 3D point or differential. The names are the keys and the subclasses the values of the ``attr_classes`` attribute. copy : bool, optional If `True` (default), arrays will be copied; if `False`, they will be broadcast together but not use new memory. """ # Ensure multiplication/division with ndarray or Quantity doesn't lead to # object arrays. __array_priority__ = 50000 info = BaseRepresentationOrDifferentialInfo() def __init__(self, *args, **kwargs): # make argument a list, so we can pop them off. args = list(args) components = self.components if (args and isinstance(args[0], self.__class__) and all(arg is None for arg in args[1:])): rep_or_diff = args[0] copy = kwargs.pop('copy', True) attrs = [getattr(rep_or_diff, component) for component in components] if 'info' in rep_or_diff.__dict__: self.info = rep_or_diff.info if kwargs: raise TypeError(f'unexpected keyword arguments for case ' f'where class instance is passed in: {kwargs}') else: attrs = [] for component in components: try: attr = args.pop(0) if args else kwargs.pop(component) except KeyError: raise TypeError(f'__init__() missing 1 required positional ' f'argument: {component!r}') from None if attr is None: raise TypeError(f'__init__() missing 1 required positional ' f'argument: {component!r} (or first ' f'argument should be an instance of ' f'{self.__class__.__name__}).') attrs.append(attr) copy = args.pop(0) if args else kwargs.pop('copy', True) if args: raise TypeError(f'unexpected arguments: {args}') if kwargs: for component in components: if component in kwargs: raise TypeError(f"__init__() got multiple values for " f"argument {component!r}") raise TypeError(f'unexpected keyword arguments: {kwargs}') # Pass attributes through the required initializing classes. attrs = [self.attr_classes[component](attr, copy=copy, subok=True) for component, attr in zip(components, attrs)] try: bc_attrs = np.broadcast_arrays(*attrs, subok=True) except ValueError as err: if len(components) <= 2: c_str = ' and '.join(components) else: c_str = ', '.join(components[:2]) + ', and ' + components[2] raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err # The output of np.broadcast_arrays() has limitations on writeability, so we perform # additional handling to enable writeability in most situations. This is primarily # relevant for allowing the changing of the wrap angle of longitude components. # # If the shape has changed for a given component, broadcasting is needed: # If copy=True, we make a copy of the broadcasted array to ensure writeability. # Note that array had already been copied prior to the broadcasting. # TODO: Find a way to avoid the double copy. # If copy=False, we use the broadcasted array, and writeability may still be # limited. # If the shape has not changed for a given component, we can proceed with using the # non-broadcasted array, which avoids writeability issues from np.broadcast_arrays(). attrs = [(bc_attr.copy() if copy else bc_attr) if bc_attr.shape != attr.shape else attr for attr, bc_attr in zip(attrs, bc_attrs)] # Set private attributes for the attributes. (If not defined explicitly # on the class, the metaclass will define properties to access these.) for component, attr in zip(components, attrs): setattr(self, '_' + component, attr) @classmethod def get_name(cls): """Name of the representation or differential. In lower case, with any trailing 'representation' or 'differential' removed. (E.g., 'spherical' for `~astropy.coordinates.SphericalRepresentation` or `~astropy.coordinates.SphericalDifferential`.) """ name = cls.__name__.lower() if name.endswith('representation'): name = name[:-14] elif name.endswith('differential'): name = name[:-12] return name # The two methods that any subclass has to define. @classmethod @abc.abstractmethod def from_cartesian(cls, other): """Create a representation of this class from a supplied Cartesian one. Parameters ---------- other : `CartesianRepresentation` The representation to turn into this class Returns ------- representation : `BaseRepresentation` subclass instance A new representation of this class's type. """ # Note: the above docstring gets overridden for differentials. raise NotImplementedError() @abc.abstractmethod def to_cartesian(self): """Convert the representation to its Cartesian form. Note that any differentials get dropped. Also note that orientation information at the origin is *not* preserved by conversions through Cartesian coordinates. For example, transforming an angular position defined at distance=0 through cartesian coordinates and back will lose the original angular coordinates:: >>> import astropy.units as u >>> import astropy.coordinates as coord >>> rep = coord.SphericalRepresentation( ... lon=15*u.deg, ... lat=-11*u.deg, ... distance=0*u.pc) >>> rep.to_cartesian().represent_as(coord.SphericalRepresentation) <SphericalRepresentation (lon, lat, distance) in (rad, rad, pc) (0., 0., 0.)> Returns ------- cartrepr : `CartesianRepresentation` The representation in Cartesian form. """ # Note: the above docstring gets overridden for differentials. raise NotImplementedError() @property def components(self): """A tuple with the in-order names of the coordinate components.""" return tuple(self.attr_classes) def __eq__(self, value): """Equality operator This implements strict equality and requires that the representation classes are identical and that the representation data are exactly equal. """ if self.__class__ is not value.__class__: raise TypeError(f'cannot compare: objects must have same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') try: np.broadcast(self, value) except ValueError as exc: raise ValueError(f'cannot compare: {exc}') from exc out = True for comp in self.components: out &= (getattr(self, '_' + comp) == getattr(value, '_' + comp)) return out def __ne__(self, value): return np.logical_not(self == value) def _apply(self, method, *args, **kwargs): """Create a new representation or differential with ``method`` applied to the component data. In typical usage, the method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.), which are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), with the results used to create a new instance. Internally, it is also used to apply functions to the components (in particular, `~numpy.broadcast_to`). Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. *args : tuple Any positional arguments for ``method``. **kwargs : dict Any keyword arguments for ``method``. """ if callable(method): apply_method = lambda array: method(array, *args, **kwargs) else: apply_method = operator.methodcaller(method, *args, **kwargs) new = super().__new__(self.__class__) for component in self.components: setattr(new, '_' + component, apply_method(getattr(self, component))) # Copy other 'info' attr only if it has actually been defined. # See PR #3898 for further explanation and justification, along # with Quantity.__array_finalize__ if 'info' in self.__dict__: new.info = self.info return new def __setitem__(self, item, value): if value.__class__ is not self.__class__: raise TypeError(f'can only set from object of same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') for component in self.components: getattr(self, '_' + component)[item] = getattr(value, '_' + component) @property def shape(self): """The shape of the instance and underlying arrays. Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a tuple. Note that if different instances share some but not all underlying data, setting the shape of one instance can make the other instance unusable. Hence, it is strongly recommended to get new, reshaped instances with the ``reshape`` method. Raises ------ ValueError If the new shape has the wrong total number of elements. AttributeError If the shape of any of the components cannot be changed without the arrays being copied. For these cases, use the ``reshape`` method (which copies any arrays that cannot be reshaped in-place). """ return getattr(self, self.components[0]).shape @shape.setter def shape(self, shape): # We keep track of arrays that were already reshaped since we may have # to return those to their original shape if a later shape-setting # fails. (This can happen since coordinates are broadcast together.) reshaped = [] oldshape = self.shape for component in self.components: val = getattr(self, component) if val.size > 1: try: val.shape = shape except Exception: for val2 in reshaped: val2.shape = oldshape raise else: reshaped.append(val) # Required to support multiplication and division, and defined by the base # representation and differential classes. @abc.abstractmethod def _scale_operation(self, op, *args): raise NotImplementedError() def __mul__(self, other): return self._scale_operation(operator.mul, other) def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): return self._scale_operation(operator.truediv, other) def __neg__(self): return self._scale_operation(operator.neg) # Follow numpy convention and make an independent copy. def __pos__(self): return self.copy() # Required to support addition and subtraction, and defined by the base # representation and differential classes. @abc.abstractmethod def _combine_operation(self, op, other, reverse=False): raise NotImplementedError() def __add__(self, other): return self._combine_operation(operator.add, other) def __radd__(self, other): return self._combine_operation(operator.add, other, reverse=True) def __sub__(self, other): return self._combine_operation(operator.sub, other) def __rsub__(self, other): return self._combine_operation(operator.sub, other, reverse=True) # The following are used for repr and str @property def _values(self): """Turn the coordinates into a record array with the coordinate values. The record array fields will have the component names. """ coo_items = [(c, getattr(self, c)) for c in self.components] result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items]) for c, coo in coo_items: result[c] = coo.value return result @property def _units(self): """Return a dictionary with the units of the coordinate components.""" return dict([(component, getattr(self, component).unit) for component in self.components]) @property def _unitstr(self): units_set = set(self._units.values()) if len(units_set) == 1: unitstr = units_set.pop().to_string() else: unitstr = '({})'.format( ', '.join([self._units[component].to_string() for component in self.components])) return unitstr def __str__(self): return f'{_array2string(self._values)} {self._unitstr:s}' def __repr__(self): prefixstr = ' ' arrstr = _array2string(self._values, prefix=prefixstr) diffstr = '' if getattr(self, 'differentials', None): diffstr = '\n (has differentials w.r.t.: {})'.format( ', '.join([repr(key) for key in self.differentials.keys()])) unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]' return '<{} ({}) {:s}\n{}{}{}>'.format( self.__class__.__name__, ', '.join(self.components), unitstr, prefixstr, arrstr, diffstr) def _make_getter(component): """Make an attribute getter for use in a property. Parameters ---------- component : str The name of the component that should be accessed. This assumes the actual value is stored in an attribute of that name prefixed by '_'. """ # This has to be done in a function to ensure the reference to component # is not lost/redirected. component = '_' + component def get_component(self): return getattr(self, component) return get_component class RepresentationInfo(BaseRepresentationOrDifferentialInfo): @property def _represent_as_dict_attrs(self): attrs = super()._represent_as_dict_attrs if self._parent._differentials: attrs += ('differentials',) return attrs def _represent_as_dict(self, attrs=None): out = super()._represent_as_dict(attrs) for key, value in out.pop('differentials', {}).items(): out[f'differentials.{key}'] = value return out def _construct_from_dict(self, map): differentials = {} for key in list(map.keys()): if key.startswith('differentials.'): differentials[key[14:]] = map.pop(key) map['differentials'] = differentials return super()._construct_from_dict(map) class BaseRepresentation(BaseRepresentationOrDifferential): """Base for representing a point in a 3D coordinate system. Parameters ---------- comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass The components of the 3D points. The names are the keys and the subclasses the values of the ``attr_classes`` attribute. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` subclass instance, or a dictionary with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. Notes ----- All representation classes should subclass this base representation class, and define an ``attr_classes`` attribute, a `dict` which maps component names to the class that creates them. They must also define a ``to_cartesian`` method and a ``from_cartesian`` class method. By default, transformations are done via the cartesian system, but classes that want to define a smarter transformation path can overload the ``represent_as`` method. If one wants to use an associated differential class, one should also define ``unit_vectors`` and ``scale_factors`` methods (see those methods for details). """ info = RepresentationInfo() def __init_subclass__(cls, **kwargs): # Register representation name (except for BaseRepresentation) if cls.__name__ == 'BaseRepresentation': return if not hasattr(cls, 'attr_classes'): raise NotImplementedError('Representations must have an ' '"attr_classes" class attribute.') repr_name = cls.get_name() # first time a duplicate is added # remove first entry and add both using their qualnames if repr_name in REPRESENTATION_CLASSES: DUPLICATE_REPRESENTATIONS.add(repr_name) fqn_cls = _fqn_class(cls) existing = REPRESENTATION_CLASSES[repr_name] fqn_existing = _fqn_class(existing) if fqn_cls == fqn_existing: raise ValueError(f'Representation "{fqn_cls}" already defined') msg = ( f'Representation "{repr_name}" already defined, removing it to avoid confusion.' f'Use qualnames "{fqn_cls}" and "{fqn_existing}" or class instances directly' ) warnings.warn(msg, DuplicateRepresentationWarning) del REPRESENTATION_CLASSES[repr_name] REPRESENTATION_CLASSES[fqn_existing] = existing repr_name = fqn_cls # further definitions with the same name, just add qualname elif repr_name in DUPLICATE_REPRESENTATIONS: fqn_cls = _fqn_class(cls) warnings.warn(f'Representation "{repr_name}" already defined, using qualname ' f'"{fqn_cls}".') repr_name = fqn_cls if repr_name in REPRESENTATION_CLASSES: raise ValueError( f'Representation "{repr_name}" already defined' ) REPRESENTATION_CLASSES[repr_name] = cls _invalidate_reprdiff_cls_hash() # define getters for any component that does not yet have one. for component in cls.attr_classes: if not hasattr(cls, component): setattr(cls, component, property(_make_getter(component), doc=f"The '{component}' component of the points(s).")) super().__init_subclass__(**kwargs) def __init__(self, *args, differentials=None, **kwargs): # Handle any differentials passed in. super().__init__(*args, **kwargs) if (differentials is None and args and isinstance(args[0], self.__class__)): differentials = args[0]._differentials self._differentials = self._validate_differentials(differentials) def _validate_differentials(self, differentials): """ Validate that the provided differentials are appropriate for this representation and recast/reshape as necessary and then return. Note that this does *not* set the differentials on ``self._differentials``, but rather leaves that for the caller. """ # Now handle the actual validation of any specified differential classes if differentials is None: differentials = dict() elif isinstance(differentials, BaseDifferential): # We can't handle auto-determining the key for this combo if (isinstance(differentials, RadialDifferential) and isinstance(self, UnitSphericalRepresentation)): raise ValueError("To attach a RadialDifferential to a " "UnitSphericalRepresentation, you must supply " "a dictionary with an appropriate key.") key = differentials._get_deriv_key(self) differentials = {key: differentials} for key in differentials: try: diff = differentials[key] except TypeError as err: raise TypeError("'differentials' argument must be a " "dictionary-like object") from err diff._check_base(self) if (isinstance(diff, RadialDifferential) and isinstance(self, UnitSphericalRepresentation)): # We trust the passing of a key for a RadialDifferential # attached to a UnitSphericalRepresentation because it will not # have a paired component name (UnitSphericalRepresentation has # no .distance) to automatically determine the expected key pass else: expected_key = diff._get_deriv_key(self) if key != expected_key: raise ValueError("For differential object '{}', expected " "unit key = '{}' but received key = '{}'" .format(repr(diff), expected_key, key)) # For now, we are very rigid: differentials must have the same shape # as the representation. This makes it easier to handle __getitem__ # and any other shape-changing operations on representations that # have associated differentials if diff.shape != self.shape: # TODO: message of IncompatibleShapeError is not customizable, # so use a valueerror instead? raise ValueError("Shape of differentials must be the same " "as the shape of the representation ({} vs " "{})".format(diff.shape, self.shape)) return differentials def _raise_if_has_differentials(self, op_name): """ Used to raise a consistent exception for any operation that is not supported when a representation has differentials attached. """ if self.differentials: raise TypeError("Operation '{}' is not supported when " "differentials are attached to a {}." .format(op_name, self.__class__.__name__)) @classproperty def _compatible_differentials(cls): return [DIFFERENTIAL_CLASSES[cls.get_name()]] @property def differentials(self): """A dictionary of differential class instances. The keys of this dictionary must be a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. """ return self._differentials # We do not make unit_vectors and scale_factors abstract methods, since # they are only necessary if one also defines an associated Differential. # Also, doing so would break pre-differential representation subclasses. def unit_vectors(self): r"""Cartesian unit vectors in the direction of each component. Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`, a change in one component of :math:`\delta c` corresponds to a change in representation of :math:`\delta c \times f_c \times \hat{e}_c`. Returns ------- unit_vectors : dict of `CartesianRepresentation` The keys are the component names. """ raise NotImplementedError(f"{type(self)} has not implemented unit vectors") def scale_factors(self): r"""Scale factors for each component's direction. Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`, a change in one component of :math:`\delta c` corresponds to a change in representation of :math:`\delta c \times f_c \times \hat{e}_c`. Returns ------- scale_factors : dict of `~astropy.units.Quantity` The keys are the component names. """ raise NotImplementedError(f"{type(self)} has not implemented scale factors.") def _re_represent_differentials(self, new_rep, differential_class): """Re-represent the differentials to the specified classes. This returns a new dictionary with the same keys but with the attached differentials converted to the new differential classes. """ if differential_class is None: return dict() if not self.differentials and differential_class: raise ValueError("No differentials associated with this " "representation!") elif (len(self.differentials) == 1 and inspect.isclass(differential_class) and issubclass(differential_class, BaseDifferential)): # TODO: is there a better way to do this? differential_class = { list(self.differentials.keys())[0]: differential_class } elif differential_class.keys() != self.differentials.keys(): raise ValueError("Desired differential classes must be passed in " "as a dictionary with keys equal to a string " "representation of the unit of the derivative " "for each differential stored with this " "representation object ({0})" .format(self.differentials)) new_diffs = dict() for k in self.differentials: diff = self.differentials[k] try: new_diffs[k] = diff.represent_as(differential_class[k], base=self) except Exception as err: if (differential_class[k] not in new_rep._compatible_differentials): raise TypeError("Desired differential class {} is not " "compatible with the desired " "representation class {}" .format(differential_class[k], new_rep.__class__)) from err else: raise return new_diffs def represent_as(self, other_class, differential_class=None): """Convert coordinates to another representation. If the instance is of the requested class, it is returned unmodified. By default, conversion is done via Cartesian coordinates. Also note that orientation information at the origin is *not* preserved by conversions through Cartesian coordinates. See the docstring for :meth:`~astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian` for an example. Parameters ---------- other_class : `~astropy.coordinates.BaseRepresentation` subclass The type of representation to turn the coordinates into. differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional Classes in which the differentials should be represented. Can be a single class if only a single differential is attached, otherwise it should be a `dict` keyed by the same keys as the differentials. """ if other_class is self.__class__ and not differential_class: return self.without_differentials() else: if isinstance(other_class, str): raise ValueError("Input to a representation's represent_as " "must be a class, not a string. For " "strings, use frame objects") if other_class is not self.__class__: # The default is to convert via cartesian coordinates new_rep = other_class.from_cartesian(self.to_cartesian()) else: new_rep = self new_rep._differentials = self._re_represent_differentials( new_rep, differential_class) return new_rep def transform(self, matrix): """Transform coordinates using a 3x3 matrix in a Cartesian basis. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. """ # route transformation through Cartesian difs_cls = {k: CartesianDifferential for k in self.differentials.keys()} crep = self.represent_as(CartesianRepresentation, differential_class=difs_cls ).transform(matrix) # move back to original representation difs_cls = {k: diff.__class__ for k, diff in self.differentials.items()} rep = crep.represent_as(self.__class__, difs_cls) return rep def with_differentials(self, differentials): """ Create a new representation with the same positions as this representation, but with these new differentials. Differential keys that already exist in this object's differential dict are overwritten. Parameters ---------- differentials : sequence of `~astropy.coordinates.BaseDifferential` subclass instance The differentials for the new representation to have. Returns ------- `~astropy.coordinates.BaseRepresentation` subclass instance A copy of this representation, but with the ``differentials`` as its differentials. """ if not differentials: return self args = [getattr(self, component) for component in self.components] # We shallow copy the differentials dictionary so we don't update the # current object's dictionary when adding new keys new_rep = self.__class__(*args, differentials=self.differentials.copy(), copy=False) new_rep._differentials.update( new_rep._validate_differentials(differentials)) return new_rep def without_differentials(self): """Return a copy of the representation without attached differentials. Returns ------- `~astropy.coordinates.BaseRepresentation` subclass instance A shallow copy of this representation, without any differentials. If no differentials were present, no copy is made. """ if not self._differentials: return self args = [getattr(self, component) for component in self.components] return self.__class__(*args, copy=False) @classmethod def from_representation(cls, representation): """Create a new instance of this representation from another one. Parameters ---------- representation : `~astropy.coordinates.BaseRepresentation` instance The presentation that should be converted to this class. """ return representation.represent_as(cls) def __eq__(self, value): """Equality operator for BaseRepresentation This implements strict equality and requires that the representation classes are identical, the differentials are identical, and that the representation data are exactly equal. """ # BaseRepresentationOrDifferental (checks classes and compares components) out = super().__eq__(value) # super() checks that the class is identical so can this even happen? # (same class, different differentials ?) if self._differentials.keys() != value._differentials.keys(): raise ValueError(f'cannot compare: objects must have same differentials') for self_diff, value_diff in zip(self._differentials.values(), value._differentials.values()): out &= (self_diff == value_diff) return out def __ne__(self, value): return np.logical_not(self == value) def _apply(self, method, *args, **kwargs): """Create a new representation with ``method`` applied to the component data. This is not a simple inherit from ``BaseRepresentationOrDifferential`` because we need to call ``._apply()`` on any associated differential classes. See docstring for `BaseRepresentationOrDifferential._apply`. Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. *args : tuple Any positional arguments for ``method``. **kwargs : dict Any keyword arguments for ``method``. """ rep = super()._apply(method, *args, **kwargs) rep._differentials = dict( [(k, diff._apply(method, *args, **kwargs)) for k, diff in self._differentials.items()]) return rep def __setitem__(self, item, value): if not isinstance(value, BaseRepresentation): raise TypeError(f'value must be a representation instance, ' f'not {type(value)}.') if not (isinstance(value, self.__class__) or len(value.attr_classes) == len(self.attr_classes)): raise ValueError( f'value must be representable as {self.__class__.__name__} ' f'without loss of information.') diff_classes = {} if self._differentials: if self._differentials.keys() != value._differentials.keys(): raise ValueError('value must have the same differentials.') for key, self_diff in self._differentials.items(): diff_classes[key] = self_diff_cls = self_diff.__class__ value_diff_cls = value._differentials[key].__class__ if not (isinstance(value_diff_cls, self_diff_cls) or (len(value_diff_cls.attr_classes) == len(self_diff_cls.attr_classes))): raise ValueError( f'value differential {key!r} must be representable as ' f'{self_diff.__class__.__name__} without loss of information.') value = value.represent_as(self.__class__, diff_classes) super().__setitem__(item, value) for key, differential in self._differentials.items(): differential[item] = value._differentials[key] def _scale_operation(self, op, *args): """Scale all non-angular components, leaving angular ones unchanged. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc. *args Any arguments required for the operator (typically, what is to be multiplied with, divided by). """ results = [] for component, cls in self.attr_classes.items(): value = getattr(self, component) if issubclass(cls, Angle): results.append(value) else: results.append(op(value, *args)) # try/except catches anything that cannot initialize the class, such # as operations that returned NotImplemented or a representation # instead of a quantity (as would happen for, e.g., rep * rep). try: result = self.__class__(*results) except Exception: return NotImplemented for key, differential in self.differentials.items(): diff_result = differential._scale_operation(op, *args, scaled_base=True) result.differentials[key] = diff_result return result def _combine_operation(self, op, other, reverse=False): """Combine two representation. By default, operate on the cartesian representations of both. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ self._raise_if_has_differentials(op.__name__) result = self.to_cartesian()._combine_operation(op, other, reverse) if result is NotImplemented: return NotImplemented else: return self.from_cartesian(result) # We need to override this setter to support differentials @BaseRepresentationOrDifferential.shape.setter def shape(self, shape): orig_shape = self.shape # See: https://stackoverflow.com/questions/3336767/ for an example BaseRepresentationOrDifferential.shape.fset(self, shape) # also try to perform shape-setting on any associated differentials try: for k in self.differentials: self.differentials[k].shape = shape except Exception: BaseRepresentationOrDifferential.shape.fset(self, orig_shape) for k in self.differentials: self.differentials[k].shape = orig_shape raise def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. Note that any associated differentials will be dropped during this operation. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ return np.sqrt(functools.reduce( operator.add, (getattr(self, component)**2 for component, cls in self.attr_classes.items() if not issubclass(cls, Angle)))) def mean(self, *args, **kwargs): """Vector mean. Averaging is done by converting the representation to cartesian, and taking the mean of the x, y, and z components. The result is converted back to the same representation as the input. Refer to `~numpy.mean` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. Returns ------- mean : `~astropy.coordinates.BaseRepresentation` subclass instance Vector mean, in the same representation as that of the input. """ self._raise_if_has_differentials('mean') return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs)) def sum(self, *args, **kwargs): """Vector sum. Adding is done by converting the representation to cartesian, and summing the x, y, and z components. The result is converted back to the same representation as the input. Refer to `~numpy.sum` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. Returns ------- sum : `~astropy.coordinates.BaseRepresentation` subclass instance Vector sum, in the same representation as that of the input. """ self._raise_if_has_differentials('sum') return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs)) def dot(self, other): """Dot product of two representations. The calculation is done by converting both ``self`` and ``other`` to `~astropy.coordinates.CartesianRepresentation`. Note that any associated differentials will be dropped during this operation. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` The representation to take the dot product with. Returns ------- dot_product : `~astropy.units.Quantity` The sum of the product of the x, y, and z components of the cartesian representations of ``self`` and ``other``. """ return self.to_cartesian().dot(other) def cross(self, other): """Vector cross product of two representations. The calculation is done by converting both ``self`` and ``other`` to `~astropy.coordinates.CartesianRepresentation`, and converting the result back to the type of representation of ``self``. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance The representation to take the cross product with. Returns ------- cross_product : `~astropy.coordinates.BaseRepresentation` subclass instance With vectors perpendicular to both ``self`` and ``other``, in the same type of representation as ``self``. """ self._raise_if_has_differentials('cross') return self.from_cartesian(self.to_cartesian().cross(other)) class CartesianRepresentation(BaseRepresentation): """ Representation of points in 3D cartesian coordinates. Parameters ---------- x, y, z : `~astropy.units.Quantity` or array The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z`` have different shapes, they should be broadcastable. If not quantity, ``unit`` should be set. If only ``x`` is given, it is assumed that it contains an array with the 3 coordinates stored along ``xyz_axis``. unit : unit-like If given, the coordinates will be converted to this unit (or taken to be in this unit if not given. xyz_axis : int, optional The axis along which the coordinates are stored when a single array is provided rather than distinct ``x``, ``y``, and ``z`` (default: 0). differentials : dict, `CartesianDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `CartesianDifferential` instance, or a dictionary of `CartesianDifferential` s with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'x': u.Quantity, 'y': u.Quantity, 'z': u.Quantity} _xyz = None def __init__(self, x, y=None, z=None, unit=None, xyz_axis=None, differentials=None, copy=True): if y is None and z is None: if isinstance(x, np.ndarray) and x.dtype.kind not in 'OV': # Short-cut for 3-D array input. x = u.Quantity(x, unit, copy=copy, subok=True) # Keep a link to the array with all three coordinates # so that we can return it quickly if needed in get_xyz. self._xyz = x if xyz_axis: x = np.moveaxis(x, xyz_axis, 0) self._xyz_axis = xyz_axis else: self._xyz_axis = 0 self._x, self._y, self._z = x self._differentials = self._validate_differentials(differentials) return elif (isinstance(x, CartesianRepresentation) and unit is None and xyz_axis is None): if differentials is None: differentials = x._differentials return super().__init__(x, differentials=differentials, copy=copy) else: x, y, z = x if xyz_axis is not None: raise ValueError("xyz_axis should only be set if x, y, and z are " "in a single array passed in through x, " "i.e., y and z should not be not given.") if y is None or z is None: raise ValueError("x, y, and z are required to instantiate {}" .format(self.__class__.__name__)) if unit is not None: x = u.Quantity(x, unit, copy=copy, subok=True) y = u.Quantity(y, unit, copy=copy, subok=True) z = u.Quantity(z, unit, copy=copy, subok=True) copy = False super().__init__(x, y, z, copy=copy, differentials=differentials) if not (self._x.unit.is_equivalent(self._y.unit) and self._x.unit.is_equivalent(self._z.unit)): raise u.UnitsError("x, y, and z should have matching physical types") def unit_vectors(self): l = np.broadcast_to(1.*u.one, self.shape, subok=True) o = np.broadcast_to(0.*u.one, self.shape, subok=True) return { 'x': CartesianRepresentation(l, o, o, copy=False), 'y': CartesianRepresentation(o, l, o, copy=False), 'z': CartesianRepresentation(o, o, l, copy=False)} def scale_factors(self): l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'x': l, 'y': l, 'z': l} def get_xyz(self, xyz_axis=0): """Return a vector array of the x, y, and z coordinates. Parameters ---------- xyz_axis : int, optional The axis in the final array along which the x, y, z components should be stored (default: 0). Returns ------- xyz : `~astropy.units.Quantity` With dimension 3 along ``xyz_axis``. Note that, if possible, this will be a view. """ if self._xyz is not None: if self._xyz_axis == xyz_axis: return self._xyz else: return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis) # Create combined array. TO DO: keep it in _xyz for repeated use? # But then in-place changes have to cancel it. Likely best to # also update components. return np.stack([self._x, self._y, self._z], axis=xyz_axis) xyz = property(get_xyz) @classmethod def from_cartesian(cls, other): return other def to_cartesian(self): return self def transform(self, matrix): """ Transform the cartesian coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : ndarray A 3x3 transformation matrix, such as a rotation matrix. Examples -------- We can start off by creating a cartesian representation object: >>> from astropy import units as u >>> from astropy.coordinates import CartesianRepresentation >>> rep = CartesianRepresentation([1, 2] * u.pc, ... [2, 3] * u.pc, ... [3, 4] * u.pc) We now create a rotation matrix around the z axis: >>> from astropy.coordinates.matrix_utilities import rotation_matrix >>> rotation = rotation_matrix(30 * u.deg, axis='z') Finally, we can apply this transformation: >>> rep_new = rep.transform(rotation) >>> rep_new.xyz # doctest: +FLOAT_CMP <Quantity [[ 1.8660254 , 3.23205081], [ 1.23205081, 1.59807621], [ 3. , 4. ]] pc> """ # erfa rxp: Multiply a p-vector by an r-matrix. p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1)) # transformed representation rep = self.__class__(p, xyz_axis=-1, copy=False) # Handle differentials attached to this representation new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) return rep.with_differentials(new_diffs) def _combine_operation(self, op, other, reverse=False): self._raise_if_has_differentials(op.__name__) try: other_c = other.to_cartesian() except Exception: return NotImplemented first, second = ((self, other_c) if not reverse else (other_c, self)) return self.__class__(*(op(getattr(first, component), getattr(second, component)) for component in first.components)) def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. Note that any associated differentials will be dropped during this operation. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ # erfa pm: Modulus of p-vector. return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1)) def mean(self, *args, **kwargs): """Vector mean. Returns a new CartesianRepresentation instance with the means of the x, y, and z components. Refer to `~numpy.mean` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('mean') return self._apply('mean', *args, **kwargs) def sum(self, *args, **kwargs): """Vector sum. Returns a new CartesianRepresentation instance with the sums of the x, y, and z components. Refer to `~numpy.sum` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('sum') return self._apply('sum', *args, **kwargs) def dot(self, other): """Dot product of two representations. Note that any associated differentials will be dropped during this operation. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance If not already cartesian, it is converted. Returns ------- dot_product : `~astropy.units.Quantity` The sum of the product of the x, y, and z components of ``self`` and ``other``. """ try: other_c = other.to_cartesian() except Exception as err: raise TypeError("cannot only take dot product with another " "representation, not a {} instance." .format(type(other))) from err # erfa pdp: p-vector inner (=scalar=dot) product. return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1)) def cross(self, other): """Cross product of two representations. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance If not already cartesian, it is converted. Returns ------- cross_product : `~astropy.coordinates.CartesianRepresentation` With vectors perpendicular to both ``self`` and ``other``. """ self._raise_if_has_differentials('cross') try: other_c = other.to_cartesian() except Exception as err: raise TypeError("cannot only take cross product with another " "representation, not a {} instance." .format(type(other))) from err # erfa pxp: p-vector outer (=vector=cross) product. sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1)) return self.__class__(sxo, xyz_axis=-1) class UnitSphericalRepresentation(BaseRepresentation): """ Representation of points on a unit sphere. Parameters ---------- lon, lat : `~astropy.units.Quantity` ['angle'] or str The longitude and latitude of the point(s), in angular units. The latitude should be between -90 and 90 degrees, and the longitude will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle`, `~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` instance (see `._compatible_differentials` for valid types), or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'lon': Longitude, 'lat': Latitude} @classproperty def _dimensional_representation(cls): return SphericalRepresentation def __init__(self, lon, lat=None, differentials=None, copy=True): super().__init__(lon, lat, differentials=differentials, copy=copy) @classproperty def _compatible_differentials(cls): return [UnitSphericalDifferential, UnitSphericalCosLatDifferential, SphericalDifferential, SphericalCosLatDifferential, RadialDifferential] # Could let the metaclass define these automatically, but good to have # a bit clearer docstrings. @property def lon(self): """ The longitude of the point(s). """ return self._lon @property def lat(self): """ The latitude of the point(s). """ return self._lat def unit_vectors(self): sinlon, coslon = np.sin(self.lon), np.cos(self.lon) sinlat, coslat = np.sin(self.lat), np.cos(self.lat) return { 'lon': CartesianRepresentation(-sinlon, coslon, 0., copy=False), 'lat': CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon, coslat, copy=False)} def scale_factors(self, omit_coslat=False): sf_lat = np.broadcast_to(1./u.radian, self.shape, subok=True) sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian return {'lon': sf_lon, 'lat': sf_lat} def to_cartesian(self): """ Converts spherical polar coordinates to 3D rectangular cartesian coordinates. """ # erfa s2c: Convert [unit]spherical coordinates to Cartesian. p = erfa_ufunc.s2c(self.lon, self.lat) return CartesianRepresentation(p, xyz_axis=-1, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. """ p = cart.get_xyz(xyz_axis=-1) # erfa c2s: P-vector to [unit]spherical coordinates. return cls(*erfa_ufunc.c2s(p), copy=False) def represent_as(self, other_class, differential_class=None): # Take a short cut if the other class is a spherical representation # TODO! for differential_class. This cannot (currently) be implemented # like in the other Representations since `_re_represent_differentials` # keeps differentials' unit keys, but this can result in a mismatch # between the UnitSpherical expected key (e.g. "s") and that expected # in the other class (here "s / m"). For more info, see PR #11467 if inspect.isclass(other_class) and not differential_class: if issubclass(other_class, PhysicsSphericalRepresentation): return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=1.0, copy=False) elif issubclass(other_class, SphericalRepresentation): return other_class(lon=self.lon, lat=self.lat, distance=1.0, copy=False) return super().represent_as(other_class, differential_class) def transform(self, matrix): r"""Transform the unit-spherical coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 matrix, such as a rotation matrix (or a stack of matrices). Returns ------- `UnitSphericalRepresentation` or `SphericalRepresentation` If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation, then the result is a `UnitSphericalRepresentation`. All other matrices will change the distance, so the dimensional representation is used instead. """ # the transformation matrix does not need to be a rotation matrix, # so the unit-distance is not guaranteed. For speed, we check if the # matrix is in O(3) and preserves lengths. if np.all(is_O3(matrix)): # remain in unit-rep xyz = erfa_ufunc.s2c(self.lon, self.lat) p = erfa_ufunc.rxp(matrix, xyz) lon, lat = erfa_ufunc.c2s(p) rep = self.__class__(lon=lon, lat=lat) # handle differentials new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) rep = rep.with_differentials(new_diffs) else: # switch to dimensional representation rep = self._dimensional_representation( lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials ).transform(matrix) return rep def _scale_operation(self, op, *args): return self._dimensional_representation( lon=self.lon, lat=self.lat, distance=1., differentials=self.differentials)._scale_operation(op, *args) def __neg__(self): if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super().__neg__() result = self.__class__(self.lon + 180. * u.deg, -self.lat, copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip((operator.pos, operator.neg), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units, which is always unity for vectors on the unit sphere. Returns ------- norm : `~astropy.units.Quantity` ['dimensionless'] Dimensionless ones, with the same shape as the representation. """ return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False) def _combine_operation(self, op, other, reverse=False): self._raise_if_has_differentials(op.__name__) result = self.to_cartesian()._combine_operation(op, other, reverse) if result is NotImplemented: return NotImplemented else: return self._dimensional_representation.from_cartesian(result) def mean(self, *args, **kwargs): """Vector mean. The representation is converted to cartesian, the means of the x, y, and z components are calculated, and the result is converted to a `~astropy.coordinates.SphericalRepresentation`. Refer to `~numpy.mean` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('mean') return self._dimensional_representation.from_cartesian( self.to_cartesian().mean(*args, **kwargs)) def sum(self, *args, **kwargs): """Vector sum. The representation is converted to cartesian, the sums of the x, y, and z components are calculated, and the result is converted to a `~astropy.coordinates.SphericalRepresentation`. Refer to `~numpy.sum` for full documentation of the arguments, noting that ``axis`` is the entry in the ``shape`` of the representation, and that the ``out`` argument cannot be used. """ self._raise_if_has_differentials('sum') return self._dimensional_representation.from_cartesian( self.to_cartesian().sum(*args, **kwargs)) def cross(self, other): """Cross product of two representations. The calculation is done by converting both ``self`` and ``other`` to `~astropy.coordinates.CartesianRepresentation`, and converting the result back to `~astropy.coordinates.SphericalRepresentation`. Parameters ---------- other : `~astropy.coordinates.BaseRepresentation` subclass instance The representation to take the cross product with. Returns ------- cross_product : `~astropy.coordinates.SphericalRepresentation` With vectors perpendicular to both ``self`` and ``other``. """ self._raise_if_has_differentials('cross') return self._dimensional_representation.from_cartesian( self.to_cartesian().cross(other)) class RadialRepresentation(BaseRepresentation): """ Representation of the distance of points from the origin. Note that this is mostly intended as an internal helper representation. It can do little else but being used as a scale in multiplication. Parameters ---------- distance : `~astropy.units.Quantity` ['length'] The distance of the point(s) from the origin. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` instance (see `._compatible_differentials` for valid types), or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'distance': u.Quantity} def __init__(self, distance, differentials=None, copy=True): super().__init__(distance, differentials=differentials, copy=copy) @property def distance(self): """ The distance from the origin to the point(s). """ return self._distance def unit_vectors(self): """Cartesian unit vectors are undefined for radial representation.""" raise NotImplementedError('Cartesian unit vectors are undefined for ' '{} instances'.format(self.__class__)) def scale_factors(self): l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'distance': l} def to_cartesian(self): """Cannot convert radial representation to cartesian.""" raise NotImplementedError('cannot convert {} instance to cartesian.' .format(self.__class__)) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to radial coordinate. """ return cls(distance=cart.norm(), copy=False) def __mul__(self, other): if isinstance(other, BaseRepresentation): return self.distance * other else: return super().__mul__(other) def norm(self): """Vector norm. Just the distance itself. Returns ------- norm : `~astropy.units.Quantity` ['dimensionless'] Dimensionless ones, with the same shape as the representation. """ return self.distance def _combine_operation(self, op, other, reverse=False): return NotImplemented def transform(self, matrix): """Radial representations cannot be transformed by a Cartesian matrix. Parameters ---------- matrix : array-like The transformation matrix in a Cartesian basis. Must be a multiplication: a diagonal matrix with identical elements. Must have shape (..., 3, 3), where the last 2 indices are for the matrix on each other axis. Make sure that the matrix shape is compatible with the shape of this representation. Raises ------ ValueError If the matrix is not a multiplication. """ scl = matrix[..., 0, 0] # check that the matrix is a scaled identity matrix on the last 2 axes. if np.any(matrix != scl[..., np.newaxis, np.newaxis] * np.identity(3)): raise ValueError("Radial representations can only be " "transformed by a scaled identity matrix") return self * scl def _spherical_op_funcs(op, *args): """For given operator, return functions that adjust lon, lat, distance.""" if op is operator.neg: return lambda x: x+180*u.deg, operator.neg, operator.pos try: scale_sign = np.sign(args[0]) except Exception: # This should always work, even if perhaps we get a negative distance. return operator.pos, operator.pos, lambda x: op(x, *args) scale = abs(args[0]) return (lambda x: x + 180*u.deg*np.signbit(scale_sign), lambda x: x * scale_sign, lambda x: op(x, scale)) class SphericalRepresentation(BaseRepresentation): """ Representation of points in 3D spherical coordinates. Parameters ---------- lon, lat : `~astropy.units.Quantity` ['angle'] The longitude and latitude of the point(s), in angular units. The latitude should be between -90 and 90 degrees, and the longitude will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle`, `~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`. distance : `~astropy.units.Quantity` ['length'] The distance to the point(s). If the distance is a length, it is passed to the :class:`~astropy.coordinates.Distance` class, otherwise it is passed to the :class:`~astropy.units.Quantity` class. differentials : dict, `~astropy.coordinates.BaseDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `~astropy.coordinates.BaseDifferential` instance (see `._compatible_differentials` for valid types), or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'lon': Longitude, 'lat': Latitude, 'distance': u.Quantity} _unit_representation = UnitSphericalRepresentation def __init__(self, lon, lat=None, distance=None, differentials=None, copy=True): super().__init__(lon, lat, distance, copy=copy, differentials=differentials) if (not isinstance(self._distance, Distance) and self._distance.unit.physical_type == 'length'): try: self._distance = Distance(self._distance, copy=False) except ValueError as e: if e.args[0].startswith('distance must be >= 0'): raise ValueError("Distance must be >= 0. To allow negative " "distance values, you must explicitly pass" " in a `Distance` object with the the " "argument 'allow_negative=True'.") from e else: raise @classproperty def _compatible_differentials(cls): return [UnitSphericalDifferential, UnitSphericalCosLatDifferential, SphericalDifferential, SphericalCosLatDifferential, RadialDifferential] @property def lon(self): """ The longitude of the point(s). """ return self._lon @property def lat(self): """ The latitude of the point(s). """ return self._lat @property def distance(self): """ The distance from the origin to the point(s). """ return self._distance def unit_vectors(self): sinlon, coslon = np.sin(self.lon), np.cos(self.lon) sinlat, coslat = np.sin(self.lat), np.cos(self.lat) return { 'lon': CartesianRepresentation(-sinlon, coslon, 0., copy=False), 'lat': CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon, coslat, copy=False), 'distance': CartesianRepresentation(coslat*coslon, coslat*sinlon, sinlat, copy=False)} def scale_factors(self, omit_coslat=False): sf_lat = self.distance / u.radian sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat) sf_distance = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'lon': sf_lon, 'lat': sf_lat, 'distance': sf_distance} def represent_as(self, other_class, differential_class=None): # Take a short cut if the other class is a spherical representation if inspect.isclass(other_class): if issubclass(other_class, PhysicsSphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=self.distance, differentials=diffs, copy=False) elif issubclass(other_class, UnitSphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(lon=self.lon, lat=self.lat, differentials=diffs, copy=False) return super().represent_as(other_class, differential_class) def to_cartesian(self): """ Converts spherical polar coordinates to 3D rectangular cartesian coordinates. """ # We need to convert Distance to Quantity to allow negative values. if isinstance(self.distance, Distance): d = self.distance.view(u.Quantity) else: d = self.distance # erfa s2p: Convert spherical polar coordinates to p-vector. p = erfa_ufunc.s2p(self.lon, self.lat, d) return CartesianRepresentation(p, xyz_axis=-1, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. """ p = cart.get_xyz(xyz_axis=-1) # erfa p2s: P-vector to spherical polar coordinates. return cls(*erfa_ufunc.p2s(p), copy=False) def transform(self, matrix): """Transform the spherical coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 matrix, such as a rotation matrix (or a stack of matrices). """ xyz = erfa_ufunc.s2c(self.lon, self.lat) p = erfa_ufunc.rxp(matrix, xyz) lon, lat, ur = erfa_ufunc.p2s(p) rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur) # handle differentials new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) return rep.with_differentials(new_diffs) def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. For spherical coordinates, this is just the absolute value of the distance. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ return np.abs(self.distance) def _scale_operation(self, op, *args): # TODO: expand special-casing to UnitSpherical and RadialDifferential. if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super()._scale_operation(op, *args) lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args) result = self.__class__(lon_op(self.lon), lat_op(self.lat), distance_op(self.distance), copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip( (operator.pos, lat_op, distance_op), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result class PhysicsSphericalRepresentation(BaseRepresentation): """ Representation of points in 3D spherical coordinates (using the physics convention of using ``phi`` and ``theta`` for azimuth and inclination from the pole). Parameters ---------- phi, theta : `~astropy.units.Quantity` or str The azimuth and inclination of the point(s), in angular units. The inclination should be between 0 and 180 degrees, and the azimuth will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi` will be changed inplace if it is not between 0 and 360 degrees. r : `~astropy.units.Quantity` The distance to the point(s). If the distance is a length, it is passed to the :class:`~astropy.coordinates.Distance` class, otherwise it is passed to the :class:`~astropy.units.Quantity` class. differentials : dict, `PhysicsSphericalDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `PhysicsSphericalDifferential` instance, or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'phi': Angle, 'theta': Angle, 'r': u.Quantity} def __init__(self, phi, theta=None, r=None, differentials=None, copy=True): super().__init__(phi, theta, r, copy=copy, differentials=differentials) # Wrap/validate phi/theta # Note that _phi already holds our own copy if copy=True. self._phi.wrap_at(360 * u.deg, inplace=True) # This invalid catch block can be removed when the minimum numpy # version is >= 1.19 (NUMPY_LT_1_19) with np.errstate(invalid='ignore'): if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg): raise ValueError('Inclination angle(s) must be within ' '0 deg <= angle <= 180 deg, ' 'got {}'.format(theta.to(u.degree))) if self._r.unit.physical_type == 'length': self._r = self._r.view(Distance) @property def phi(self): """ The azimuth of the point(s). """ return self._phi @property def theta(self): """ The elevation of the point(s). """ return self._theta @property def r(self): """ The distance from the origin to the point(s). """ return self._r def unit_vectors(self): sinphi, cosphi = np.sin(self.phi), np.cos(self.phi) sintheta, costheta = np.sin(self.theta), np.cos(self.theta) return { 'phi': CartesianRepresentation(-sinphi, cosphi, 0., copy=False), 'theta': CartesianRepresentation(costheta*cosphi, costheta*sinphi, -sintheta, copy=False), 'r': CartesianRepresentation(sintheta*cosphi, sintheta*sinphi, costheta, copy=False)} def scale_factors(self): r = self.r / u.radian sintheta = np.sin(self.theta) l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'phi': r * sintheta, 'theta': r, 'r': l} def represent_as(self, other_class, differential_class=None): # Take a short cut if the other class is a spherical representation if inspect.isclass(other_class): if issubclass(other_class, SphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(lon=self.phi, lat=90 * u.deg - self.theta, distance=self.r, differentials=diffs, copy=False) elif issubclass(other_class, UnitSphericalRepresentation): diffs = self._re_represent_differentials(other_class, differential_class) return other_class(lon=self.phi, lat=90 * u.deg - self.theta, differentials=diffs, copy=False) return super().represent_as(other_class, differential_class) def to_cartesian(self): """ Converts spherical polar coordinates to 3D rectangular cartesian coordinates. """ # We need to convert Distance to Quantity to allow negative values. if isinstance(self.r, Distance): d = self.r.view(u.Quantity) else: d = self.r x = d * np.sin(self.theta) * np.cos(self.phi) y = d * np.sin(self.theta) * np.sin(self.phi) z = d * np.cos(self.theta) return CartesianRepresentation(x=x, y=y, z=z, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. """ s = np.hypot(cart.x, cart.y) r = np.hypot(s, cart.z) phi = np.arctan2(cart.y, cart.x) theta = np.arctan2(s, cart.z) return cls(phi=phi, theta=theta, r=r, copy=False) def transform(self, matrix): """Transform the spherical coordinates using a 3x3 matrix. This returns a new representation and does not modify the original one. Any differentials attached to this representation will also be transformed. Parameters ---------- matrix : (3,3) array-like A 3x3 matrix, such as a rotation matrix (or a stack of matrices). """ # apply transformation in unit-spherical coordinates xyz = erfa_ufunc.s2c(self.phi, 90*u.deg-self.theta) p = erfa_ufunc.rxp(matrix, xyz) lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r` # create transformed physics-spherical representation, # reapplying the distance scaling rep = self.__class__(phi=lon, theta=90*u.deg-lat, r=self.r * ur) new_diffs = dict((k, d.transform(matrix, self, rep)) for k, d in self.differentials.items()) return rep.with_differentials(new_diffs) def norm(self): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. For spherical coordinates, this is just the absolute value of the radius. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ return np.abs(self.r) def _scale_operation(self, op, *args): if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super()._scale_operation(op, *args) phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args) # Also run phi_op on theta to ensure theta remains between 0 and 180: # any time the scale is negative, we do -theta + 180 degrees. result = self.__class__(phi_op(self.phi), phi_op(adjust_theta_sign(self.theta)), r_op(self.r), copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip( (operator.pos, adjust_theta_sign, r_op), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result class CylindricalRepresentation(BaseRepresentation): """ Representation of points in 3D cylindrical coordinates. Parameters ---------- rho : `~astropy.units.Quantity` The distance from the z axis to the point(s). phi : `~astropy.units.Quantity` or str The azimuth of the point(s), in angular units, which will be wrapped to an angle between 0 and 360 degrees. This can also be instances of `~astropy.coordinates.Angle`, z : `~astropy.units.Quantity` The z coordinate(s) of the point(s) differentials : dict, `CylindricalDifferential`, optional Any differential classes that should be associated with this representation. The input must either be a single `CylindricalDifferential` instance, or a dictionary of of differential instances with keys set to a string representation of the SI unit with which the differential (derivative) is taken. For example, for a velocity differential on a positional representation, the key would be ``'s'`` for seconds, indicating that the derivative is a time derivative. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ attr_classes = {'rho': u.Quantity, 'phi': Angle, 'z': u.Quantity} def __init__(self, rho, phi=None, z=None, differentials=None, copy=True): super().__init__(rho, phi, z, copy=copy, differentials=differentials) if not self._rho.unit.is_equivalent(self._z.unit): raise u.UnitsError("rho and z should have matching physical types") @property def rho(self): """ The distance of the point(s) from the z-axis. """ return self._rho @property def phi(self): """ The azimuth of the point(s). """ return self._phi @property def z(self): """ The height of the point(s). """ return self._z def unit_vectors(self): sinphi, cosphi = np.sin(self.phi), np.cos(self.phi) l = np.broadcast_to(1., self.shape) return { 'rho': CartesianRepresentation(cosphi, sinphi, 0, copy=False), 'phi': CartesianRepresentation(-sinphi, cosphi, 0, copy=False), 'z': CartesianRepresentation(0, 0, l, unit=u.one, copy=False)} def scale_factors(self): rho = self.rho / u.radian l = np.broadcast_to(1.*u.one, self.shape, subok=True) return {'rho': l, 'phi': rho, 'z': l} @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates to cylindrical polar coordinates. """ rho = np.hypot(cart.x, cart.y) phi = np.arctan2(cart.y, cart.x) z = cart.z return cls(rho=rho, phi=phi, z=z, copy=False) def to_cartesian(self): """ Converts cylindrical polar coordinates to 3D rectangular cartesian coordinates. """ x = self.rho * np.cos(self.phi) y = self.rho * np.sin(self.phi) z = self.z return CartesianRepresentation(x=x, y=y, z=z, copy=False) def _scale_operation(self, op, *args): if any(differential.base_representation is not self.__class__ for differential in self.differentials.values()): return super()._scale_operation(op, *args) phi_op, _, rho_op = _spherical_op_funcs(op, *args) z_op = lambda x: op(x, *args) result = self.__class__(rho_op(self.rho), phi_op(self.phi), z_op(self.z), copy=False) for key, differential in self.differentials.items(): new_comps = (op(getattr(differential, comp)) for op, comp in zip( (rho_op, operator.pos, z_op), differential.components)) result.differentials[key] = differential.__class__(*new_comps, copy=False) return result class BaseDifferential(BaseRepresentationOrDifferential): r"""A base class representing differentials of representations. These represent differences or derivatives along each component. E.g., for physics spherical coordinates, these would be :math:`\delta r, \delta \theta, \delta \phi`. Parameters ---------- d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass The components of the 3D differentials. The names are the keys and the subclasses the values of the ``attr_classes`` attribute. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. Notes ----- All differential representation classes should subclass this base class, and define an ``base_representation`` attribute with the class of the regular `~astropy.coordinates.BaseRepresentation` for which differential coordinates are provided. This will set up a default ``attr_classes`` instance with names equal to the base component names prefixed by ``d_``, and all classes set to `~astropy.units.Quantity`, plus properties to access those, and a default ``__init__`` for initialization. """ def __init_subclass__(cls, **kwargs): """Set default ``attr_classes`` and component getters on a Differential. class BaseDifferential(BaseRepresentationOrDifferential): For these, the components are those of the base representation prefixed by 'd_', and the class is `~astropy.units.Quantity`. """ # Don't do anything for base helper classes. if cls.__name__ in ('BaseDifferential', 'BaseSphericalDifferential', 'BaseSphericalCosLatDifferential'): return if not hasattr(cls, 'base_representation'): raise NotImplementedError('Differential representations must have a' '"base_representation" class attribute.') # If not defined explicitly, create attr_classes. if not hasattr(cls, 'attr_classes'): base_attr_classes = cls.base_representation.attr_classes cls.attr_classes = {'d_' + c: u.Quantity for c in base_attr_classes} repr_name = cls.get_name() if repr_name in DIFFERENTIAL_CLASSES: raise ValueError(f"Differential class {repr_name} already defined") DIFFERENTIAL_CLASSES[repr_name] = cls _invalidate_reprdiff_cls_hash() # If not defined explicitly, create properties for the components. for component in cls.attr_classes: if not hasattr(cls, component): setattr(cls, component, property(_make_getter(component), doc=f"Component '{component}' of the Differential.")) super().__init_subclass__(**kwargs) @classmethod def _check_base(cls, base): if cls not in base._compatible_differentials: raise TypeError(f"Differential class {cls} is not compatible with the " f"base (representation) class {base.__class__}") def _get_deriv_key(self, base): """Given a base (representation instance), determine the unit of the derivative by removing the representation unit from the component units of this differential. """ # This check is just a last resort so we don't return a strange unit key # from accidentally passing in the wrong base. self._check_base(base) for name in base.components: comp = getattr(base, name) d_comp = getattr(self, f'd_{name}', None) if d_comp is not None: d_unit = comp.unit / d_comp.unit # This is quite a bit faster than using to_system() or going # through Quantity() d_unit_si = d_unit.decompose(u.si.bases) d_unit_si._scale = 1 # remove the scale from the unit return str(d_unit_si) else: raise RuntimeError("Invalid representation-differential units! This" " likely happened because either the " "representation or the associated differential " "have non-standard units. Check that the input " "positional data have positional units, and the " "input velocity data have velocity units, or " "are both dimensionless.") @classmethod def _get_base_vectors(cls, base): """Get unit vectors and scale factors from base. Parameters ---------- base : instance of ``self.base_representation`` The points for which the unit vectors and scale factors should be retrieved. Returns ------- unit_vectors : dict of `CartesianRepresentation` In the directions of the coordinates of base. scale_factors : dict of `~astropy.units.Quantity` Scale factors for each of the coordinates Raises ------ TypeError : if the base is not of the correct type """ cls._check_base(base) return base.unit_vectors(), base.scale_factors() def to_cartesian(self, base): """Convert the differential to 3D rectangular cartesian coordinates. Parameters ---------- base : instance of ``self.base_representation`` The points for which the differentials are to be converted: each of the components is multiplied by its unit vectors and scale factors. Returns ------- `CartesianDifferential` This object, converted. """ base_e, base_sf = self._get_base_vectors(base) return functools.reduce( operator.add, (getattr(self, d_c) * base_sf[c] * base_e[c] for d_c, c in zip(self.components, base.components))) @classmethod def from_cartesian(cls, other, base): """Convert the differential from 3D rectangular cartesian coordinates to the desired class. Parameters ---------- other The object to convert into this differential. base : `BaseRepresentation` The points for which the differentials are to be converted: each of the components is multiplied by its unit vectors and scale factors. Will be converted to ``cls.base_representation`` if needed. Returns ------- `BaseDifferential` subclass instance A new differential object that is this class' type. """ base = base.represent_as(cls.base_representation) base_e, base_sf = cls._get_base_vectors(base) return cls(*(other.dot(e / base_sf[component]) for component, e in base_e.items()), copy=False) def represent_as(self, other_class, base): """Convert coordinates to another representation. If the instance is of the requested class, it is returned unmodified. By default, conversion is done via cartesian coordinates. Parameters ---------- other_class : `~astropy.coordinates.BaseRepresentation` subclass The type of representation to turn the coordinates into. base : instance of ``self.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ if other_class is self.__class__: return self # The default is to convert via cartesian coordinates. self_cartesian = self.to_cartesian(base) if issubclass(other_class, BaseDifferential): return other_class.from_cartesian(self_cartesian, base) else: return other_class.from_cartesian(self_cartesian) @classmethod def from_representation(cls, representation, base): """Create a new instance of this representation from another one. Parameters ---------- representation : `~astropy.coordinates.BaseRepresentation` instance The presentation that should be converted to this class. base : instance of ``cls.base_representation`` The base relative to which the differentials will be defined. If the representation is a differential itself, the base will be converted to its ``base_representation`` to help convert it. """ if isinstance(representation, BaseDifferential): cartesian = representation.to_cartesian( base.represent_as(representation.base_representation)) else: cartesian = representation.to_cartesian() return cls.from_cartesian(cartesian, base) def transform(self, matrix, base, transformed_base): """Transform differential using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base : instance of ``cls.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. transformed_base : instance of ``cls.base_representation`` Base relative to which the transformed differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ # route transformation through Cartesian cdiff = self.represent_as(CartesianDifferential, base=base ).transform(matrix) # move back to original representation diff = cdiff.represent_as(self.__class__, transformed_base) return diff def _scale_operation(self, op, *args, scaled_base=False): """Scale all components. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc. *args Any arguments required for the operator (typically, what is to be multiplied with, divided by). scaled_base : bool, optional Whether the base was scaled the same way. This affects whether differential components should be scaled. For instance, a differential in longitude should not be scaled if its spherical base is scaled in radius. """ scaled_attrs = [op(getattr(self, c), *args) for c in self.components] return self.__class__(*scaled_attrs, copy=False) def _combine_operation(self, op, other, reverse=False): """Combine two differentials, or a differential with a representation. If ``other`` is of the same differential type as ``self``, the components will simply be combined. If ``other`` is a representation, it will be used as a base for which to evaluate the differential, and the result is a new representation. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other differential or representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ if isinstance(self, type(other)): first, second = (self, other) if not reverse else (other, self) return self.__class__(*[op(getattr(first, c), getattr(second, c)) for c in self.components]) else: try: self_cartesian = self.to_cartesian(other) except TypeError: return NotImplemented return other._combine_operation(op, self_cartesian, not reverse) def __sub__(self, other): # avoid "differential - representation". if isinstance(other, BaseRepresentation): return NotImplemented return super().__sub__(other) def norm(self, base=None): """Vector norm. The norm is the standard Frobenius norm, i.e., the square root of the sum of the squares of all components with non-angular units. Parameters ---------- base : instance of ``self.base_representation`` Base relative to which the differentials are defined. This is required to calculate the physical size of the differential for all but Cartesian differentials or radial differentials. Returns ------- norm : `astropy.units.Quantity` Vector norm, with the same shape as the representation. """ # RadialDifferential overrides this function, so there is no handling here if not isinstance(self, CartesianDifferential) and base is None: raise ValueError("`base` must be provided to calculate the norm of a" f" {type(self).__name__}") return self.to_cartesian(base).norm() class CartesianDifferential(BaseDifferential): """Differentials in of points in 3D cartesian coordinates. Parameters ---------- d_x, d_y, d_z : `~astropy.units.Quantity` or array The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``, and ``d_z`` have different shapes, they should be broadcastable. If not quantities, ``unit`` should be set. If only ``d_x`` is given, it is assumed that it contains an array with the 3 coordinates stored along ``xyz_axis``. unit : `~astropy.units.Unit` or str If given, the differentials will be converted to this unit (or taken to be in this unit if not given. xyz_axis : int, optional The axis along which the coordinates are stored when a single array is provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0). copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = CartesianRepresentation _d_xyz = None def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None, copy=True): if d_y is None and d_z is None: if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in 'OV': # Short-cut for 3-D array input. d_x = u.Quantity(d_x, unit, copy=copy, subok=True) # Keep a link to the array with all three coordinates # so that we can return it quickly if needed in get_xyz. self._d_xyz = d_x if xyz_axis: d_x = np.moveaxis(d_x, xyz_axis, 0) self._xyz_axis = xyz_axis else: self._xyz_axis = 0 self._d_x, self._d_y, self._d_z = d_x return else: d_x, d_y, d_z = d_x if xyz_axis is not None: raise ValueError("xyz_axis should only be set if d_x, d_y, and d_z " "are in a single array passed in through d_x, " "i.e., d_y and d_z should not be not given.") if d_y is None or d_z is None: raise ValueError("d_x, d_y, and d_z are required to instantiate {}" .format(self.__class__.__name__)) if unit is not None: d_x = u.Quantity(d_x, unit, copy=copy, subok=True) d_y = u.Quantity(d_y, unit, copy=copy, subok=True) d_z = u.Quantity(d_z, unit, copy=copy, subok=True) copy = False super().__init__(d_x, d_y, d_z, copy=copy) if not (self._d_x.unit.is_equivalent(self._d_y.unit) and self._d_x.unit.is_equivalent(self._d_z.unit)): raise u.UnitsError('d_x, d_y and d_z should have equivalent units.') def to_cartesian(self, base=None): return CartesianRepresentation(*[getattr(self, c) for c in self.components]) @classmethod def from_cartesian(cls, other, base=None): return cls(*[getattr(other, c) for c in other.components]) def transform(self, matrix, base=None, transformed_base=None): """Transform differentials using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base, transformed_base : `~astropy.coordinates.CartesianRepresentation` or None, optional Not used in the Cartesian transformation. """ # erfa rxp: Multiply a p-vector by an r-matrix. p = erfa_ufunc.rxp(matrix, self.get_d_xyz(xyz_axis=-1)) return self.__class__(p, xyz_axis=-1, copy=False) def get_d_xyz(self, xyz_axis=0): """Return a vector array of the x, y, and z coordinates. Parameters ---------- xyz_axis : int, optional The axis in the final array along which the x, y, z components should be stored (default: 0). Returns ------- d_xyz : `~astropy.units.Quantity` With dimension 3 along ``xyz_axis``. Note that, if possible, this will be a view. """ if self._d_xyz is not None: if self._xyz_axis == xyz_axis: return self._d_xyz else: return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis) # Create combined array. TO DO: keep it in _d_xyz for repeated use? # But then in-place changes have to cancel it. Likely best to # also update components. return np.stack([self._d_x, self._d_y, self._d_z], axis=xyz_axis) d_xyz = property(get_d_xyz) class BaseSphericalDifferential(BaseDifferential): def _d_lon_coslat(self, base): """Convert longitude differential d_lon to d_lon_coslat. Parameters ---------- base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ self._check_base(base) return self.d_lon * np.cos(base.lat) @classmethod def _get_d_lon(cls, d_lon_coslat, base): """Convert longitude differential d_lon_coslat to d_lon. Parameters ---------- d_lon_coslat : `~astropy.units.Quantity` Longitude differential that includes ``cos(lat)``. base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ cls._check_base(base) return d_lon_coslat / np.cos(base.lat) def _combine_operation(self, op, other, reverse=False): """Combine two differentials, or a differential with a representation. If ``other`` is of the same differential type as ``self``, the components will simply be combined. If both are different parts of a `~astropy.coordinates.SphericalDifferential` (e.g., a `~astropy.coordinates.UnitSphericalDifferential` and a `~astropy.coordinates.RadialDifferential`), they will combined appropriately. If ``other`` is a representation, it will be used as a base for which to evaluate the differential, and the result is a new representation. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other differential or representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ if (isinstance(other, BaseSphericalDifferential) and not isinstance(self, type(other)) or isinstance(other, RadialDifferential)): all_components = set(self.components) | set(other.components) first, second = (self, other) if not reverse else (other, self) result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) for c in all_components} return SphericalDifferential(**result_args) return super()._combine_operation(op, other, reverse) class UnitSphericalDifferential(BaseSphericalDifferential): """Differential(s) of points on a unit sphere. Parameters ---------- d_lon, d_lat : `~astropy.units.Quantity` The longitude and latitude of the differentials. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = UnitSphericalRepresentation @classproperty def _dimensional_differential(cls): return SphericalDifferential def __init__(self, d_lon, d_lat=None, copy=True): super().__init__(d_lon, d_lat, copy=copy) if not self._d_lon.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon and d_lat should have equivalent units.') @classmethod def from_cartesian(cls, other, base): # Go via the dimensional equivalent, so that the longitude and latitude # differentials correctly take into account the norm of the base. dimensional = cls._dimensional_differential.from_cartesian(other, base) return dimensional.represent_as(cls) def to_cartesian(self, base): if isinstance(base, SphericalRepresentation): scale = base.distance elif isinstance(base, PhysicsSphericalRepresentation): scale = base.r else: return super().to_cartesian(base) base = base.represent_as(UnitSphericalRepresentation) return scale * super().to_cartesian(base) def represent_as(self, other_class, base=None): # Only have enough information to represent other unit-spherical. if issubclass(other_class, UnitSphericalCosLatDifferential): return other_class(self._d_lon_coslat(base), self.d_lat) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # All spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. if isinstance(representation, SphericalDifferential): return cls(representation.d_lon, representation.d_lat) elif isinstance(representation, (SphericalCosLatDifferential, UnitSphericalCosLatDifferential)): d_lon = cls._get_d_lon(representation.d_lon_coslat, base) return cls(d_lon, representation.d_lat) elif isinstance(representation, PhysicsSphericalDifferential): return cls(representation.d_phi, -representation.d_theta) return super().from_representation(representation, base) def transform(self, matrix, base, transformed_base): """Transform differential using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base : instance of ``cls.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. transformed_base : instance of ``cls.base_representation`` Base relative to which the transformed differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ # the transformation matrix does not need to be a rotation matrix, # so the unit-distance is not guaranteed. For speed, we check if the # matrix is in O(3) and preserves lengths. if np.all(is_O3(matrix)): # remain in unit-rep # TODO! implement without Cartesian intermediate step. # some of this can be moved to the parent class. diff = super().transform(matrix, base, transformed_base) else: # switch to dimensional representation du = self.d_lon.unit / base.lon.unit # derivative unit diff = self._dimensional_differential( d_lon=self.d_lon, d_lat=self.d_lat, d_distance=0 * du ).transform(matrix, base, transformed_base) return diff def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.copy() else: return super()._scale_operation(op, *args) class SphericalDifferential(BaseSphericalDifferential): """Differential(s) of points in 3D spherical coordinates. Parameters ---------- d_lon, d_lat : `~astropy.units.Quantity` The differential longitude and latitude. d_distance : `~astropy.units.Quantity` The differential distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = SphericalRepresentation _unit_differential = UnitSphericalDifferential def __init__(self, d_lon, d_lat=None, d_distance=None, copy=True): super().__init__(d_lon, d_lat, d_distance, copy=copy) if not self._d_lon.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon and d_lat should have equivalent units.') def represent_as(self, other_class, base=None): # All spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. if issubclass(other_class, UnitSphericalDifferential): return other_class(self.d_lon, self.d_lat) elif issubclass(other_class, RadialDifferential): return other_class(self.d_distance) elif issubclass(other_class, SphericalCosLatDifferential): return other_class(self._d_lon_coslat(base), self.d_lat, self.d_distance) elif issubclass(other_class, UnitSphericalCosLatDifferential): return other_class(self._d_lon_coslat(base), self.d_lat) elif issubclass(other_class, PhysicsSphericalDifferential): return other_class(self.d_lon, -self.d_lat, self.d_distance) else: return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # Other spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. if isinstance(representation, SphericalCosLatDifferential): d_lon = cls._get_d_lon(representation.d_lon_coslat, base) return cls(d_lon, representation.d_lat, representation.d_distance) elif isinstance(representation, PhysicsSphericalDifferential): return cls(representation.d_phi, -representation.d_theta, representation.d_r) return super().from_representation(representation, base) def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.__class__(self.d_lon, self.d_lat, op(self.d_distance, *args)) else: return super()._scale_operation(op, *args) class BaseSphericalCosLatDifferential(BaseDifferential): """Differentials from points on a spherical base representation. With cos(lat) assumed to be included in the longitude differential. """ @classmethod def _get_base_vectors(cls, base): """Get unit vectors and scale factors from (unit)spherical base. Parameters ---------- base : instance of ``self.base_representation`` The points for which the unit vectors and scale factors should be retrieved. Returns ------- unit_vectors : dict of `CartesianRepresentation` In the directions of the coordinates of base. scale_factors : dict of `~astropy.units.Quantity` Scale factors for each of the coordinates. The scale factor for longitude does not include the cos(lat) factor. Raises ------ TypeError : if the base is not of the correct type """ cls._check_base(base) return base.unit_vectors(), base.scale_factors(omit_coslat=True) def _d_lon(self, base): """Convert longitude differential with cos(lat) to one without. Parameters ---------- base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ self._check_base(base) return self.d_lon_coslat / np.cos(base.lat) @classmethod def _get_d_lon_coslat(cls, d_lon, base): """Convert longitude differential d_lon to d_lon_coslat. Parameters ---------- d_lon : `~astropy.units.Quantity` Value of the longitude differential without ``cos(lat)``. base : instance of ``cls.base_representation`` The base from which the latitude will be taken. """ cls._check_base(base) return d_lon * np.cos(base.lat) def _combine_operation(self, op, other, reverse=False): """Combine two differentials, or a differential with a representation. If ``other`` is of the same differential type as ``self``, the components will simply be combined. If both are different parts of a `~astropy.coordinates.SphericalDifferential` (e.g., a `~astropy.coordinates.UnitSphericalDifferential` and a `~astropy.coordinates.RadialDifferential`), they will combined appropriately. If ``other`` is a representation, it will be used as a base for which to evaluate the differential, and the result is a new representation. Parameters ---------- op : `~operator` callable Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. other : `~astropy.coordinates.BaseRepresentation` subclass instance The other differential or representation. reverse : bool Whether the operands should be reversed (e.g., as we got here via ``self.__rsub__`` because ``self`` is a subclass of ``other``). """ if (isinstance(other, BaseSphericalCosLatDifferential) and not isinstance(self, type(other)) or isinstance(other, RadialDifferential)): all_components = set(self.components) | set(other.components) first, second = (self, other) if not reverse else (other, self) result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) for c in all_components} return SphericalCosLatDifferential(**result_args) return super()._combine_operation(op, other, reverse) class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential): """Differential(s) of points on a unit sphere. Parameters ---------- d_lon_coslat, d_lat : `~astropy.units.Quantity` The longitude and latitude of the differentials. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = UnitSphericalRepresentation attr_classes = {'d_lon_coslat': u.Quantity, 'd_lat': u.Quantity} @classproperty def _dimensional_differential(cls): return SphericalCosLatDifferential def __init__(self, d_lon_coslat, d_lat=None, copy=True): super().__init__(d_lon_coslat, d_lat, copy=copy) if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon_coslat and d_lat should have equivalent ' 'units.') @classmethod def from_cartesian(cls, other, base): # Go via the dimensional equivalent, so that the longitude and latitude # differentials correctly take into account the norm of the base. dimensional = cls._dimensional_differential.from_cartesian(other, base) return dimensional.represent_as(cls) def to_cartesian(self, base): if isinstance(base, SphericalRepresentation): scale = base.distance elif isinstance(base, PhysicsSphericalRepresentation): scale = base.r else: return super().to_cartesian(base) base = base.represent_as(UnitSphericalRepresentation) return scale * super().to_cartesian(base) def represent_as(self, other_class, base=None): # Only have enough information to represent other unit-spherical. if issubclass(other_class, UnitSphericalDifferential): return other_class(self._d_lon(base), self.d_lat) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # All spherical differentials can be done without going to Cartesian, # though w/o CosLat needs base for the latitude. if isinstance(representation, SphericalCosLatDifferential): return cls(representation.d_lon_coslat, representation.d_lat) elif isinstance(representation, (SphericalDifferential, UnitSphericalDifferential)): d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base) return cls(d_lon_coslat, representation.d_lat) elif isinstance(representation, PhysicsSphericalDifferential): d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base) return cls(d_lon_coslat, -representation.d_theta) return super().from_representation(representation, base) def transform(self, matrix, base, transformed_base): """Transform differential using a 3x3 matrix in a Cartesian basis. This returns a new differential and does not modify the original one. Parameters ---------- matrix : (3,3) array-like A 3x3 (or stack thereof) matrix, such as a rotation matrix. base : instance of ``cls.base_representation`` Base relative to which the differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. transformed_base : instance of ``cls.base_representation`` Base relative to which the transformed differentials are defined. If the other class is a differential representation, the base will be converted to its ``base_representation``. """ # the transformation matrix does not need to be a rotation matrix, # so the unit-distance is not guaranteed. For speed, we check if the # matrix is in O(3) and preserves lengths. if np.all(is_O3(matrix)): # remain in unit-rep # TODO! implement without Cartesian intermediate step. diff = super().transform(matrix, base, transformed_base) else: # switch to dimensional representation du = self.d_lat.unit / base.lat.unit # derivative unit diff = self._dimensional_differential( d_lon_coslat=self.d_lon_coslat, d_lat=self.d_lat, d_distance=0 * du ).transform(matrix, base, transformed_base) return diff def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.copy() else: return super()._scale_operation(op, *args) class SphericalCosLatDifferential(BaseSphericalCosLatDifferential): """Differential(s) of points in 3D spherical coordinates. Parameters ---------- d_lon_coslat, d_lat : `~astropy.units.Quantity` The differential longitude (with cos(lat) included) and latitude. d_distance : `~astropy.units.Quantity` The differential distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = SphericalRepresentation _unit_differential = UnitSphericalCosLatDifferential attr_classes = {'d_lon_coslat': u.Quantity, 'd_lat': u.Quantity, 'd_distance': u.Quantity} def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True): super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy) if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit): raise u.UnitsError('d_lon_coslat and d_lat should have equivalent ' 'units.') def represent_as(self, other_class, base=None): # All spherical differentials can be done without going to Cartesian, # though some need base for the latitude to remove cos(lat). if issubclass(other_class, UnitSphericalCosLatDifferential): return other_class(self.d_lon_coslat, self.d_lat) elif issubclass(other_class, RadialDifferential): return other_class(self.d_distance) elif issubclass(other_class, SphericalDifferential): return other_class(self._d_lon(base), self.d_lat, self.d_distance) elif issubclass(other_class, UnitSphericalDifferential): return other_class(self._d_lon(base), self.d_lat) elif issubclass(other_class, PhysicsSphericalDifferential): return other_class(self._d_lon(base), -self.d_lat, self.d_distance) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # Other spherical differentials can be done without going to Cartesian, # though we need base for the latitude to remove coslat. if isinstance(representation, SphericalDifferential): d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base) return cls(d_lon_coslat, representation.d_lat, representation.d_distance) elif isinstance(representation, PhysicsSphericalDifferential): d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base) return cls(d_lon_coslat, -representation.d_theta, representation.d_r) return super().from_representation(representation, base) def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.__class__(self.d_lon_coslat, self.d_lat, op(self.d_distance, *args)) else: return super()._scale_operation(op, *args) class RadialDifferential(BaseDifferential): """Differential(s) of radial distances. Parameters ---------- d_distance : `~astropy.units.Quantity` The differential distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = RadialRepresentation def to_cartesian(self, base): return self.d_distance * base.represent_as( UnitSphericalRepresentation).to_cartesian() def norm(self, base=None): return self.d_distance @classmethod def from_cartesian(cls, other, base): return cls(other.dot(base.represent_as(UnitSphericalRepresentation)), copy=False) @classmethod def from_representation(cls, representation, base=None): if isinstance(representation, (SphericalDifferential, SphericalCosLatDifferential)): return cls(representation.d_distance) elif isinstance(representation, PhysicsSphericalDifferential): return cls(representation.d_r) else: return super().from_representation(representation, base) def _combine_operation(self, op, other, reverse=False): if isinstance(other, self.base_representation): if reverse: first, second = other.distance, self.d_distance else: first, second = self.d_distance, other.distance return other.__class__(op(first, second), copy=False) elif isinstance(other, (BaseSphericalDifferential, BaseSphericalCosLatDifferential)): all_components = set(self.components) | set(other.components) first, second = (self, other) if not reverse else (other, self) result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) for c in all_components} return SphericalDifferential(**result_args) else: return super()._combine_operation(op, other, reverse) class PhysicsSphericalDifferential(BaseDifferential): """Differential(s) of 3D spherical coordinates using physics convention. Parameters ---------- d_phi, d_theta : `~astropy.units.Quantity` The differential azimuth and inclination. d_r : `~astropy.units.Quantity` The differential radial distance. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = PhysicsSphericalRepresentation def __init__(self, d_phi, d_theta=None, d_r=None, copy=True): super().__init__(d_phi, d_theta, d_r, copy=copy) if not self._d_phi.unit.is_equivalent(self._d_theta.unit): raise u.UnitsError('d_phi and d_theta should have equivalent ' 'units.') def represent_as(self, other_class, base=None): # All spherical differentials can be done without going to Cartesian, # though CosLat needs base for the latitude. For those, explicitly # do the equivalent of self._d_lon_coslat in SphericalDifferential. if issubclass(other_class, SphericalDifferential): return other_class(self.d_phi, -self.d_theta, self.d_r) elif issubclass(other_class, UnitSphericalDifferential): return other_class(self.d_phi, -self.d_theta) elif issubclass(other_class, SphericalCosLatDifferential): self._check_base(base) d_lon_coslat = self.d_phi * np.sin(base.theta) return other_class(d_lon_coslat, -self.d_theta, self.d_r) elif issubclass(other_class, UnitSphericalCosLatDifferential): self._check_base(base) d_lon_coslat = self.d_phi * np.sin(base.theta) return other_class(d_lon_coslat, -self.d_theta) elif issubclass(other_class, RadialDifferential): return other_class(self.d_r) return super().represent_as(other_class, base) @classmethod def from_representation(cls, representation, base=None): # Other spherical differentials can be done without going to Cartesian, # though we need base for the latitude to remove coslat. For that case, # do the equivalent of cls._d_lon in SphericalDifferential. if isinstance(representation, SphericalDifferential): return cls(representation.d_lon, -representation.d_lat, representation.d_distance) elif isinstance(representation, SphericalCosLatDifferential): cls._check_base(base) d_phi = representation.d_lon_coslat / np.sin(base.theta) return cls(d_phi, -representation.d_lat, representation.d_distance) return super().from_representation(representation, base) def _scale_operation(self, op, *args, scaled_base=False): if scaled_base: return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args)) else: return super()._scale_operation(op, *args) class CylindricalDifferential(BaseDifferential): """Differential(s) of points in cylindrical coordinates. Parameters ---------- d_rho : `~astropy.units.Quantity` ['speed'] The differential cylindrical radius. d_phi : `~astropy.units.Quantity` ['angular speed'] The differential azimuth. d_z : `~astropy.units.Quantity` ['speed'] The differential height. copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ base_representation = CylindricalRepresentation def __init__(self, d_rho, d_phi=None, d_z=None, copy=False): super().__init__(d_rho, d_phi, d_z, copy=copy) if not self._d_rho.unit.is_equivalent(self._d_z.unit): raise u.UnitsError("d_rho and d_z should have equivalent units.")
8a15aaa006139ea031ff99a39812a5a746157bac8d0b00bfe1af9a5e20ade85e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains convenience functions for getting a coordinate object for a named object by querying SESAME and getting the first returned result. Note that this is intended to be a convenience, and is very simple. If you need precise coordinates for an object you should find the appropriate reference for that measurement and input the coordinates manually. """ # Standard library import os import re import socket import urllib.request import urllib.parse import urllib.error # Astropy from astropy import units as u from .sky_coordinate import SkyCoord from astropy.utils import data from astropy.utils.data import download_file, get_file_contents from astropy.utils.state import ScienceState __all__ = ["get_icrs_coordinates"] class sesame_url(ScienceState): """ The URL(s) to Sesame's web-queryable database. """ _value = ["http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/", "http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/"] @classmethod def validate(cls, value): # TODO: Implement me return value class sesame_database(ScienceState): """ This specifies the default database that SESAME will query when using the name resolve mechanism in the coordinates subpackage. Default is to search all databases, but this can be 'all', 'simbad', 'ned', or 'vizier'. """ _value = 'all' @classmethod def validate(cls, value): if value not in ['all', 'simbad', 'ned', 'vizier']: raise ValueError(f"Unknown database '{value}'") return value class NameResolveError(Exception): pass def _parse_response(resp_data): """ Given a string response from SESAME, parse out the coordinates by looking for a line starting with a J, meaning ICRS J2000 coordinates. Parameters ---------- resp_data : str The string HTTP response from SESAME. Returns ------- ra : str The string Right Ascension parsed from the HTTP response. dec : str The string Declination parsed from the HTTP response. """ pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)") matched = pattr.search(resp_data) if matched is None: return None, None else: ra, dec = matched.groups() return ra, dec def get_icrs_coordinates(name, parse=False, cache=False): """ Retrieve an ICRS object by using an online name resolving service to retrieve coordinates for the specified name. By default, this will search all available databases until a match is found. If you would like to specify the database, use the science state ``astropy.coordinates.name_resolve.sesame_database``. You can also specify a list of servers to use for querying Sesame using the science state ``astropy.coordinates.name_resolve.sesame_url``. This will try each one in order until a valid response is returned. By default, this list includes the main Sesame host and a mirror at vizier. The configuration item `astropy.utils.data.Conf.remote_timeout` controls the number of seconds to wait for a response from the server before giving up. Parameters ---------- name : str The name of the object to get coordinates for, e.g. ``'M42'``. parse : bool Whether to attempt extracting the coordinates from the name by parsing with a regex. For objects catalog names that have J-coordinates embedded in their names eg: 'CRTS SSS100805 J194428-420209', this may be much faster than a sesame query for the same object name. The coordinates extracted in this way may differ from the database coordinates by a few deci-arcseconds, so only use this option if you do not need sub-arcsecond accuracy for coordinates. cache : bool, str, optional Determines whether to cache the results or not. Passed through to `~astropy.utils.data.download_file`, so pass "update" to update the cached value. Returns ------- coord : `astropy.coordinates.ICRS` object The object's coordinates in the ICRS frame. """ # if requested, first try extract coordinates embedded in the object name. # Do this first since it may be much faster than doing the sesame query if parse: from . import jparser if jparser.search(name): return jparser.to_skycoord(name) else: # if the parser failed, fall back to sesame query. pass # maybe emit a warning instead of silently falling back to sesame? database = sesame_database.get() # The web API just takes the first letter of the database name db = database.upper()[0] # Make sure we don't have duplicates in the url list urls = [] domains = [] for url in sesame_url.get(): domain = urllib.parse.urlparse(url).netloc # Check for duplicates if domain not in domains: domains.append(domain) # Add the query to the end of the url, add to url list fmt_url = os.path.join(url, "{db}?{name}") fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db) urls.append(fmt_url) exceptions = [] for url in urls: try: resp_data = get_file_contents( download_file(url, cache=cache, show_progress=False)) break except urllib.error.URLError as e: exceptions.append(e) continue except socket.timeout as e: # There are some cases where urllib2 does not catch socket.timeout # especially while receiving response data on an already previously # working request e.reason = ("Request took longer than the allowed " f"{data.conf.remote_timeout:.1f} seconds") exceptions.append(e) continue # All Sesame URL's failed... else: messages = [f"{url}: {e.reason}" for url, e in zip(urls, exceptions)] raise NameResolveError("All Sesame queries failed. Unable to " "retrieve coordinates. See errors per URL " f"below: \n {os.linesep.join(messages)}") ra, dec = _parse_response(resp_data) if ra is None or dec is None: if db == "A": err = f"Unable to find coordinates for name '{name}' using {url}" else: err = f"Unable to find coordinates for name '{name}' in database {database} using {url}" raise NameResolveError(err) # Return SkyCoord object sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs') return sc
a533f5432c48980ecae823643527e936f4370210438df718f1e05c37ea8db5be
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains utility functions for working with angles. These are both used internally in astropy.coordinates.angles, and of possible """ __all__ = ['angular_separation', 'position_angle', 'offset_by', 'golden_spiral_grid', 'uniform_spherical_random_surface', 'uniform_spherical_random_volume'] # Third-party import numpy as np # Astropy import astropy.units as u from astropy.coordinates.representation import ( UnitSphericalRepresentation, SphericalRepresentation) def angular_separation(lon1, lat1, lon2, lat2): """ Angular separation between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- angular separation : `~astropy.units.Quantity` ['angle'] or float Type depends on input; ``Quantity`` in angular units, or float in radians. Notes ----- The angular separation is calculated using the Vincenty formula [1]_, which is slightly more complex and computationally expensive than some alternatives, but is stable at at all distances, including the poles and antipodes. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance """ sdlon = np.sin(lon2 - lon1) cdlon = np.cos(lon2 - lon1) slat1 = np.sin(lat1) slat2 = np.sin(lat2) clat1 = np.cos(lat1) clat2 = np.cos(lat2) num1 = clat2 * sdlon num2 = clat1 * slat2 - slat1 * clat2 * cdlon denominator = slat1 * slat2 + clat1 * clat2 * cdlon return np.arctan2(np.hypot(num1, num2), denominator) def position_angle(lon1, lat1, lon2, lat2): """ Position Angle (East of North) between two points on a sphere. Parameters ---------- lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the two points. Quantities should be in angular units; floats in radians. Returns ------- pa : `~astropy.coordinates.Angle` The (positive) position angle of the vector pointing from position 1 to position 2. If any of the angles are arrays, this will contain an array following the appropriate `numpy` broadcasting rules. """ from .angles import Angle deltalon = lon2 - lon1 colat = np.cos(lat2) x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon) y = np.sin(deltalon) * colat return Angle(np.arctan2(y, x), u.radian).wrap_at(360*u.deg) def offset_by(lon, lat, posang, distance): """ Point with the given offset from the given point. Parameters ---------- lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float Longitude and latitude of the starting point, position angle and distance to the final point. Quantities should be in angular units; floats in radians. Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon. Returns ------- lon, lat : `~astropy.coordinates.Angle` The position of the final point. If any of the angles are arrays, these will contain arrays following the appropriate `numpy` broadcasting rules. 0 <= lon < 2pi. Notes ----- """ from .angles import Angle # Calculations are done using the spherical trigonometry sine and cosine rules # of the triangle A at North Pole, B at starting point, C at final point # with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang) # with sides a (distance), b (final co-latitude), c (starting colatitude) # B, a, c are knowns; A and b are unknowns # https://en.wikipedia.org/wiki/Spherical_trigonometry cos_a = np.cos(distance) sin_a = np.sin(distance) cos_c = np.sin(lat) sin_c = np.cos(lat) cos_B = np.cos(posang) sin_B = np.sin(posang) # cosine rule: Know two sides: a,c and included angle: B; get unknown side b cos_b = cos_c * cos_a + sin_c * sin_a * cos_B # sin_b = np.sqrt(1 - cos_b**2) # sine rule and cosine rule for A (using both lets arctan2 pick quadrant). # multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors # at poles. Correct for the x=0 multiplication a few lines down. # sin_A/sin_a == sin_B/sin_b # Sine rule xsin_A = sin_a * sin_B * sin_c # cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule xcos_A = cos_a - cos_b * cos_c A = Angle(np.arctan2(xsin_A, xcos_A), u.radian) # Treat the poles as if they are infinitesimally far from pole but at given lon small_sin_c = sin_c < 1e-12 if small_sin_c.any(): # For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang A_pole = (90*u.deg + cos_c*(90*u.deg-Angle(posang, u.radian))).to(u.rad) if A.shape: # broadcast to ensure the shape is like that of A, which is also # affected by the (possible) shapes of lat, posang, and distance. small_sin_c = np.broadcast_to(small_sin_c, A.shape) A[small_sin_c] = A_pole[small_sin_c] else: A = A_pole outlon = (Angle(lon, u.radian) + A).wrap_at(360.0*u.deg).to(u.deg) outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg) return outlon, outlat def golden_spiral_grid(size): """Generate a grid of points on the surface of the unit sphere using the Fibonacci or Golden Spiral method. .. seealso:: `Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_ Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The grid of points. """ golden_r = (1 + 5**0.5) / 2 grid = np.arange(0, size, dtype=float) + 0.5 lon = 2*np.pi / golden_r * grid * u.rad lat = np.arcsin(1 - 2 * grid / size) * u.rad return UnitSphericalRepresentation(lon, lat) def uniform_spherical_random_surface(size=1): """Generate a random sampling of points on the surface of the unit sphere. Parameters ---------- size : int The number of points to generate. Returns ------- rep : `~astropy.coordinates.UnitSphericalRepresentation` The random points. """ rng = np.random # can maybe switch to this being an input later - see #11628 lon = rng.uniform(0, 2*np.pi, size) * u.rad lat = np.arcsin(rng.uniform(-1, 1, size=size)) * u.rad return UnitSphericalRepresentation(lon, lat) def uniform_spherical_random_volume(size=1, max_radius=1): """Generate a random sampling of points that follow a uniform volume density distribution within a sphere. Parameters ---------- size : int The number of points to generate. max_radius : number, quantity-like, optional A dimensionless or unit-ful factor to scale the random distances. rng : `numpy.random.Generator`, optional A random number generator instance. Returns ------- rep : `~astropy.coordinates.SphericalRepresentation` The random points. """ rng = np.random # can maybe switch to this being an input later - see #11628 usph = uniform_spherical_random_surface(size=size) r = np.cbrt(rng.uniform(size=size)) * u.Quantity(max_radius, copy=False) return SphericalRepresentation(usph.lon, usph.lat, r) # # below here can be deleted in v5.0 from astropy.utils.decorators import deprecated from astropy.coordinates import angle_formats __old_angle_utilities_funcs = ['check_hms_ranges', 'degrees_to_dms', 'degrees_to_string', 'dms_to_degrees', 'format_exception', 'hms_to_degrees', 'hms_to_dms', 'hms_to_hours', 'hms_to_radians', 'hours_to_decimal', 'hours_to_hms', 'hours_to_radians', 'hours_to_string', 'parse_angle', 'radians_to_degrees', 'radians_to_dms', 'radians_to_hms', 'radians_to_hours', 'sexagesimal_to_string'] for funcname in __old_angle_utilities_funcs: vars()[funcname] = deprecated(name='astropy.coordinates.angle_utilities.' + funcname, alternative='astropy.coordinates.angle_formats.' + funcname, since='v4.3')(getattr(angle_formats, funcname))
e11e18f1f02204695ebde82a5529f3956dd307c2f93e519f40cde45c2a427886
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains a general framework for defining graphs of transformations between coordinates, suitable for either spatial coordinates or more generalized coordinate systems. The fundamental idea is that each class is a node in the transformation graph, and transitions from one node to another are defined as functions (or methods) wrapped in transformation objects. This module also includes more specific transformation classes for celestial/spatial coordinate frames, generally focused around matrix-style transformations that are typically how the algorithms are defined. """ import heapq import inspect import subprocess from warnings import warn from abc import ABCMeta, abstractmethod from collections import defaultdict from contextlib import suppress, contextmanager from inspect import signature import numpy as np from astropy import units as u from astropy.utils.exceptions import AstropyWarning from .matrix_utilities import matrix_product __all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform', 'BaseAffineTransform', 'AffineTransform', 'StaticMatrixTransform', 'DynamicMatrixTransform', 'FunctionTransformWithFiniteDifference', 'CompositeTransform'] def frame_attrs_from_set(frame_set): """ A `dict` of all the attributes of all frame classes in this `TransformGraph`. Broken out of the class so this can be called on a temporary frame set to validate new additions to the transform graph before actually adding them. """ result = {} for frame_cls in frame_set: result.update(frame_cls.frame_attributes) return result def frame_comps_from_set(frame_set): """ A `set` of all component names every defined within any frame class in this `TransformGraph`. Broken out of the class so this can be called on a temporary frame set to validate new additions to the transform graph before actually adding them. """ result = set() for frame_cls in frame_set: rep_info = frame_cls._frame_specific_representation_info for mappings in rep_info.values(): for rep_map in mappings: result.update([rep_map.framename]) return result class TransformGraph: """ A graph representing the paths between coordinate frames. """ def __init__(self): self._graph = defaultdict(dict) self.invalidate_cache() # generates cache entries @property def _cached_names(self): if self._cached_names_dct is None: self._cached_names_dct = dct = {} for c in self.frame_set: nm = getattr(c, 'name', None) if nm is not None: if not isinstance(nm, list): nm = [nm] for name in nm: dct[name] = c return self._cached_names_dct @property def frame_set(self): """ A `set` of all the frame classes present in this `TransformGraph`. """ if self._cached_frame_set is None: self._cached_frame_set = set() for a in self._graph: self._cached_frame_set.add(a) for b in self._graph[a]: self._cached_frame_set.add(b) return self._cached_frame_set.copy() @property def frame_attributes(self): """ A `dict` of all the attributes of all frame classes in this `TransformGraph`. """ if self._cached_frame_attributes is None: self._cached_frame_attributes = frame_attrs_from_set(self.frame_set) return self._cached_frame_attributes @property def frame_component_names(self): """ A `set` of all component names every defined within any frame class in this `TransformGraph`. """ if self._cached_component_names is None: self._cached_component_names = frame_comps_from_set(self.frame_set) return self._cached_component_names def invalidate_cache(self): """ Invalidates the cache that stores optimizations for traversing the transform graph. This is called automatically when transforms are added or removed, but will need to be called manually if weights on transforms are modified inplace. """ self._cached_names_dct = None self._cached_frame_set = None self._cached_frame_attributes = None self._cached_component_names = None self._shortestpaths = {} self._composite_cache = {} def add_transform(self, fromsys, tosys, transform): """ Add a new coordinate transformation to the graph. Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. transform : `CoordinateTransform` The transformation object. Typically a `CoordinateTransform` object, although it may be some other callable that is called with the same signature. Raises ------ TypeError If ``fromsys`` or ``tosys`` are not classes or ``transform`` is not callable. """ if not inspect.isclass(fromsys): raise TypeError('fromsys must be a class') if not inspect.isclass(tosys): raise TypeError('tosys must be a class') if not callable(transform): raise TypeError('transform must be callable') frame_set = self.frame_set.copy() frame_set.add(fromsys) frame_set.add(tosys) # Now we check to see if any attributes on the proposed frames override # *any* component names, which we can't allow for some of the logic in # the SkyCoord initializer to work attrs = set(frame_attrs_from_set(frame_set).keys()) comps = frame_comps_from_set(frame_set) invalid_attrs = attrs.intersection(comps) if invalid_attrs: invalid_frames = set() for attr in invalid_attrs: if attr in fromsys.frame_attributes: invalid_frames.update([fromsys]) if attr in tosys.frame_attributes: invalid_frames.update([tosys]) raise ValueError("Frame(s) {} contain invalid attribute names: {}" "\nFrame attributes can not conflict with *any* of" " the frame data component names (see" " `frame_transform_graph.frame_component_names`)." .format(list(invalid_frames), invalid_attrs)) self._graph[fromsys][tosys] = transform self.invalidate_cache() def remove_transform(self, fromsys, tosys, transform): """ Removes a coordinate transform from the graph. Parameters ---------- fromsys : class or None The coordinate frame *class* to start from. If `None`, ``transform`` will be searched for and removed (``tosys`` must also be `None`). tosys : class or None The coordinate frame *class* to transform into. If `None`, ``transform`` will be searched for and removed (``fromsys`` must also be `None`). transform : callable or None The transformation object to be removed or `None`. If `None` and ``tosys`` and ``fromsys`` are supplied, there will be no check to ensure the correct object is removed. """ if fromsys is None or tosys is None: if not (tosys is None and fromsys is None): raise ValueError('fromsys and tosys must both be None if either are') if transform is None: raise ValueError('cannot give all Nones to remove_transform') # search for the requested transform by brute force and remove it for a in self._graph: agraph = self._graph[a] for b in agraph: if agraph[b] is transform: del agraph[b] fromsys = a break # If the transform was found, need to break out of the outer for loop too if fromsys: break else: raise ValueError(f'Could not find transform {transform} in the graph') else: if transform is None: self._graph[fromsys].pop(tosys, None) else: curr = self._graph[fromsys].get(tosys, None) if curr is transform: self._graph[fromsys].pop(tosys) else: raise ValueError('Current transform from {} to {} is not ' '{}'.format(fromsys, tosys, transform)) # Remove the subgraph if it is now empty if self._graph[fromsys] == {}: self._graph.pop(fromsys) self.invalidate_cache() def find_shortest_path(self, fromsys, tosys): """ Computes the shortest distance along the transform graph from one system to another. Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. Returns ------- path : list of class or None The path from ``fromsys`` to ``tosys`` as an in-order sequence of classes. This list includes *both* ``fromsys`` and ``tosys``. Is `None` if there is no possible path. distance : float or int The total distance/priority from ``fromsys`` to ``tosys``. If priorities are not set this is the number of transforms needed. Is ``inf`` if there is no possible path. """ inf = float('inf') # special-case the 0 or 1-path if tosys is fromsys: if tosys not in self._graph[fromsys]: # Means there's no transform necessary to go from it to itself. return [tosys], 0 if tosys in self._graph[fromsys]: # this will also catch the case where tosys is fromsys, but has # a defined transform. t = self._graph[fromsys][tosys] return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1) # otherwise, need to construct the path: if fromsys in self._shortestpaths: # already have a cached result fpaths = self._shortestpaths[fromsys] if tosys in fpaths: return fpaths[tosys] else: return None, inf # use Dijkstra's algorithm to find shortest path in all other cases nodes = [] # first make the list of nodes for a in self._graph: if a not in nodes: nodes.append(a) for b in self._graph[a]: if b not in nodes: nodes.append(b) if fromsys not in nodes or tosys not in nodes: # fromsys or tosys are isolated or not registered, so there's # certainly no way to get from one to the other return None, inf edgeweights = {} # construct another graph that is a dict of dicts of priorities # (used as edge weights in Dijkstra's algorithm) for a in self._graph: edgeweights[a] = aew = {} agraph = self._graph[a] for b in agraph: aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1) # entries in q are [distance, count, nodeobj, pathlist] # count is needed because in py 3.x, tie-breaking fails on the nodes. # this way, insertion order is preserved if the weights are the same q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys] q.insert(0, [0, -1, fromsys, []]) # this dict will store the distance to node from ``fromsys`` and the path result = {} # definitely starts as a valid heap because of the insert line; from the # node to itself is always the shortest distance while len(q) > 0: d, orderi, n, path = heapq.heappop(q) if d == inf: # everything left is unreachable from fromsys, just copy them to # the results and jump out of the loop result[n] = (None, d) for d, orderi, n, path in q: result[n] = (None, d) break else: result[n] = (path, d) path.append(n) if n not in edgeweights: # this is a system that can be transformed to, but not from. continue for n2 in edgeweights[n]: if n2 not in result: # already visited # find where n2 is in the heap for i in range(len(q)): if q[i][2] == n2: break else: raise ValueError('n2 not in heap - this should be impossible!') newd = d + edgeweights[n][n2] if newd < q[i][0]: q[i][0] = newd q[i][3] = list(path) heapq.heapify(q) # cache for later use self._shortestpaths[fromsys] = result return result[tosys] def get_transform(self, fromsys, tosys): """ Generates and returns the `CompositeTransform` for a transformation between two coordinate systems. Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. Returns ------- trans : `CompositeTransform` or None If there is a path from ``fromsys`` to ``tosys``, this is a transform object for that path. If no path could be found, this is `None`. Notes ----- This function always returns a `CompositeTransform`, because `CompositeTransform` is slightly more adaptable in the way it can be called than other transform classes. Specifically, it takes care of intermediate steps of transformations in a way that is consistent with 1-hop transformations. """ if not inspect.isclass(fromsys): raise TypeError('fromsys is not a class') if not inspect.isclass(tosys): raise TypeError('tosys is not a class') path, distance = self.find_shortest_path(fromsys, tosys) if path is None: return None transforms = [] currsys = fromsys for p in path[1:]: # first element is fromsys so we skip it transforms.append(self._graph[currsys][p]) currsys = p fttuple = (fromsys, tosys) if fttuple not in self._composite_cache: comptrans = CompositeTransform(transforms, fromsys, tosys, register_graph=False) self._composite_cache[fttuple] = comptrans return self._composite_cache[fttuple] def lookup_name(self, name): """ Tries to locate the coordinate class with the provided alias. Parameters ---------- name : str The alias to look up. Returns ------- `BaseCoordinateFrame` subclass The coordinate class corresponding to the ``name`` or `None` if no such class exists. """ return self._cached_names.get(name, None) def get_names(self): """ Returns all available transform names. They will all be valid arguments to `lookup_name`. Returns ------- nms : list The aliases for coordinate systems. """ return list(self._cached_names.keys()) def to_dot_graph(self, priorities=True, addnodes=[], savefn=None, savelayout='plain', saveformat=None, color_edges=True): """ Converts this transform graph to the graphviz_ DOT format. Optionally saves it (requires `graphviz`_ be installed and on your path). .. _graphviz: http://www.graphviz.org/ Parameters ---------- priorities : bool If `True`, show the priority values for each transform. Otherwise, the will not be included in the graph. addnodes : sequence of str Additional coordinate systems to add (this can include systems already in the transform graph, but they will only appear once). savefn : None or str The file name to save this graph to or `None` to not save to a file. savelayout : str The graphviz program to use to layout the graph (see graphviz_ for details) or 'plain' to just save the DOT graph content. Ignored if ``savefn`` is `None`. saveformat : str The graphviz output format. (e.g. the ``-Txxx`` option for the command line program - see graphviz docs for details). Ignored if ``savefn`` is `None`. color_edges : bool Color the edges between two nodes (frames) based on the type of transform. ``FunctionTransform``: red, ``StaticMatrixTransform``: blue, ``DynamicMatrixTransform``: green. Returns ------- dotgraph : str A string with the DOT format graph. """ nodes = [] # find the node names for a in self._graph: if a not in nodes: nodes.append(a) for b in self._graph[a]: if b not in nodes: nodes.append(b) for node in addnodes: if node not in nodes: nodes.append(node) nodenames = [] invclsaliases = dict([(f, [k for k, v in self._cached_names.items() if v == f]) for f in self.frame_set]) for n in nodes: if n in invclsaliases: aliases = '`\\n`'.join(invclsaliases[n]) nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)) else: nodenames.append(n.__name__ + '[ shape=oval ]') edgenames = [] # Now the edges for a in self._graph: agraph = self._graph[a] for b in agraph: transform = agraph[b] pri = transform.priority if hasattr(transform, 'priority') else 1 color = trans_to_color[transform.__class__] if color_edges else 'black' edgenames.append((a.__name__, b.__name__, pri, color)) # generate simple dot format graph lines = ['digraph AstropyCoordinateTransformGraph {'] lines.append('graph [rankdir=LR]') lines.append('; '.join(nodenames) + ';') for enm1, enm2, weights, color in edgenames: labelstr_fmt = '[ {0} {1} ]' if priorities: priority_part = f'label = "{weights}"' else: priority_part = '' color_part = f'color = "{color}"' labelstr = labelstr_fmt.format(priority_part, color_part) lines.append(f'{enm1} -> {enm2}{labelstr};') lines.append('') lines.append('overlap=false') lines.append('}') dotgraph = '\n'.join(lines) if savefn is not None: if savelayout == 'plain': with open(savefn, 'w') as f: f.write(dotgraph) else: args = [savelayout] if saveformat is not None: args.append('-T' + saveformat) proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate(dotgraph) if proc.returncode != 0: raise OSError('problem running graphviz: \n' + stderr) with open(savefn, 'w') as f: f.write(stdout) return dotgraph def to_networkx_graph(self): """ Converts this transform graph into a networkx graph. .. note:: You must have the `networkx <https://networkx.github.io/>`_ package installed for this to work. Returns ------- nxgraph : ``networkx.Graph`` This `TransformGraph` as a `networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_. """ import networkx as nx nxgraph = nx.Graph() # first make the nodes for a in self._graph: if a not in nxgraph: nxgraph.add_node(a) for b in self._graph[a]: if b not in nxgraph: nxgraph.add_node(b) # Now the edges for a in self._graph: agraph = self._graph[a] for b in agraph: transform = agraph[b] pri = transform.priority if hasattr(transform, 'priority') else 1 color = trans_to_color[transform.__class__] nxgraph.add_edge(a, b, weight=pri, color=color) return nxgraph def transform(self, transcls, fromsys, tosys, priority=1, **kwargs): """ A function decorator for defining transformations. .. note:: If decorating a static method of a class, ``@staticmethod`` should be added *above* this decorator. Parameters ---------- transcls : class The class of the transformation object to create. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. Additional keyword arguments are passed into the ``transcls`` constructor. Returns ------- deco : function A function that can be called on another function as a decorator (see example). Notes ----- This decorator assumes the first argument of the ``transcls`` initializer accepts a callable, and that the second and third are ``fromsys`` and ``tosys``. If this is not true, you should just initialize the class manually and use `add_transform` instead of using this decorator. Examples -------- :: graph = TransformGraph() class Frame1(BaseCoordinateFrame): ... class Frame2(BaseCoordinateFrame): ... @graph.transform(FunctionTransform, Frame1, Frame2) def f1_to_f2(f1_obj): ... do something with f1_obj ... return f2_obj """ def deco(func): # this doesn't do anything directly with the transform because # ``register_graph=self`` stores it in the transform graph # automatically transcls(func, fromsys, tosys, priority=priority, register_graph=self, **kwargs) return func return deco def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1): """ Add a single-step transform that encapsulates a multi-step transformation path, using the transforms that already exist in the graph. The created transform internally calls the existing transforms. If all of the transforms are affine, the merged transform is `~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no origin shifts) or `~astropy.coordinates.transformations.AffineTransform` (otherwise). If at least one of the transforms is not affine, the merged transform is `~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`. This method is primarily useful for defining loopback transformations (i.e., where ``fromsys`` and the final ``tosys`` are the same). Parameters ---------- fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform to. furthersys : class Additional coordinate frame classes to transform to in order. priority : number The priority of this transform when finding the shortest coordinate transform path - large numbers are lower priorities. Notes ----- Even though the created transform is a single step in the graph, it will still internally call the constituent transforms. Thus, there is no performance benefit for using this created transform. For Astropy's built-in frames, loopback transformations typically use `~astropy.coordinates.ICRS` to be safe. Tranforming through an inertial frame ensures that changes in observation time and observer location/velocity are properly accounted for. An error will be raised if a direct transform between ``fromsys`` and ``tosys`` already exist. """ frames = [fromsys, tosys, *furthersys] lastsys = frames[-1] full_path = self.get_transform(fromsys, lastsys) transforms = [self.get_transform(frame_a, frame_b) for frame_a, frame_b in zip(frames[:-1], frames[1:])] if None in transforms: raise ValueError(f"This transformation path is not possible") if len(full_path.transforms) == 1: raise ValueError(f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already exists") self.add_transform(fromsys, lastsys, CompositeTransform(transforms, fromsys, lastsys, priority=priority)._as_single_transform()) @contextmanager def impose_finite_difference_dt(self, dt): """ Context manager to impose a finite-difference time step on all applicable transformations For each transformation in this transformation graph that has the attribute ``finite_difference_dt``, that attribute is set to the provided value. The only standard transformation with this attribute is `~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`. Parameters ---------- dt : `~astropy.units.Quantity` ['time'] or callable If a quantity, this is the size of the differential used to do the finite difference. If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value. """ key = 'finite_difference_dt' saved_settings = [] try: for to_frames in self._graph.values(): for transform in to_frames.values(): if hasattr(transform, key): old_setting = (transform, key, getattr(transform, key)) saved_settings.append(old_setting) setattr(transform, key, dt) yield finally: for setting in saved_settings: setattr(*setting) # <-------------------Define the builtin transform classes--------------------> class CoordinateTransform(metaclass=ABCMeta): """ An object that transforms a coordinate from one system to another. Subclasses must implement `__call__` with the provided signature. They should also call this superclass's ``__init__`` in their ``__init__``. Parameters ---------- fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass The coordinate frame class to start from. tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. """ def __init__(self, fromsys, tosys, priority=1, register_graph=None): if not inspect.isclass(fromsys): raise TypeError('fromsys must be a class') if not inspect.isclass(tosys): raise TypeError('tosys must be a class') self.fromsys = fromsys self.tosys = tosys self.priority = float(priority) if register_graph: # this will do the type-checking when it adds to the graph self.register(register_graph) else: if not inspect.isclass(fromsys) or not inspect.isclass(tosys): raise TypeError('fromsys and tosys must be classes') self.overlapping_frame_attr_names = overlap = [] if (hasattr(fromsys, 'get_frame_attr_names') and hasattr(tosys, 'get_frame_attr_names')): # the if statement is there so that non-frame things might be usable # if it makes sense for from_nm in fromsys.frame_attributes.keys(): if from_nm in tosys.frame_attributes.keys(): overlap.append(from_nm) def register(self, graph): """ Add this transformation to the requested Transformation graph, replacing anything already connecting these two coordinates. Parameters ---------- graph : `TransformGraph` object The graph to register this transformation with. """ graph.add_transform(self.fromsys, self.tosys, self) def unregister(self, graph): """ Remove this transformation from the requested transformation graph. Parameters ---------- graph : a TransformGraph object The graph to unregister this transformation from. Raises ------ ValueError If this is not currently in the transform graph. """ graph.remove_transform(self.fromsys, self.tosys, self) @abstractmethod def __call__(self, fromcoord, toframe): """ Does the actual coordinate transformation from the ``fromsys`` class to the ``tosys`` class. Parameters ---------- fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance An object of class matching ``fromsys`` that is to be transformed. toframe : object An object that has the attributes necessary to fully specify the frame. That is, it must have attributes with names that match the keys of the dictionary that ``tosys.get_frame_attr_names()`` returns. Typically this is of class ``tosys``, but it *might* be some other class as long as it has the appropriate attributes. Returns ------- tocoord : `BaseCoordinateFrame` subclass instance The new coordinate after the transform has been applied. """ class FunctionTransform(CoordinateTransform): """ A coordinate transformation defined by a function that accepts a coordinate object and returns the transformed coordinate object. Parameters ---------- func : callable The transformation function. Should have a call signature ``func(formcoord, toframe)``. Note that, unlike `CoordinateTransform.__call__`, ``toframe`` is assumed to be of type ``tosys`` for this function. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ TypeError If ``func`` is not callable. ValueError If ``func`` cannot accept two arguments. """ def __init__(self, func, fromsys, tosys, priority=1, register_graph=None): if not callable(func): raise TypeError('func must be callable') with suppress(TypeError): sig = signature(func) kinds = [x.kind for x in sig.parameters.values()] if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2 and sig.VAR_POSITIONAL not in kinds): raise ValueError('provided function does not accept two arguments') self.func = func super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def __call__(self, fromcoord, toframe): res = self.func(fromcoord, toframe) if not isinstance(res, self.tosys): raise TypeError(f'the transformation function yielded {res} but ' f'should have been of type {self.tosys}') if fromcoord.data.differentials and not res.data.differentials: warn("Applied a FunctionTransform to a coordinate frame with " "differentials, but the FunctionTransform does not handle " "differentials, so they have been dropped.", AstropyWarning) return res class FunctionTransformWithFiniteDifference(FunctionTransform): r""" A coordinate transformation that works like a `FunctionTransform`, but computes velocity shifts based on the finite-difference relative to one of the frame attributes. Note that the transform function should *not* change the differential at all in this case, as any differentials will be overridden. When a differential is in the from coordinate, the finite difference calculation has two components. The first part is simple the existing differential, but re-orientation (using finite-difference techniques) to point in the direction the velocity vector has in the *new* frame. The second component is the "induced" velocity. That is, the velocity intrinsic to the frame itself, estimated by shifting the frame using the ``finite_difference_frameattr_name`` frame attribute a small amount (``finite_difference_dt``) in time and re-calculating the position. Parameters ---------- finite_difference_frameattr_name : str or None The name of the frame attribute on the frames to use for the finite difference. Both the to and the from frame will be checked for this attribute, but only one needs to have it. If None, no velocity component induced from the frame itself will be included - only the re-orientation of any existing differential. finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable If a quantity, this is the size of the differential used to do the finite difference. If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value. symmetric_finite_difference : bool If True, the finite difference is computed as :math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter case has slightly better performance (and more stable finite difference behavior). All other parameters are identical to the initializer for `FunctionTransform`. """ def __init__(self, func, fromsys, tosys, priority=1, register_graph=None, finite_difference_frameattr_name='obstime', finite_difference_dt=1*u.second, symmetric_finite_difference=True): super().__init__(func, fromsys, tosys, priority, register_graph) self.finite_difference_frameattr_name = finite_difference_frameattr_name self.finite_difference_dt = finite_difference_dt self.symmetric_finite_difference = symmetric_finite_difference @property def finite_difference_frameattr_name(self): return self._finite_difference_frameattr_name @finite_difference_frameattr_name.setter def finite_difference_frameattr_name(self, value): if value is None: self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False else: diff_attr_in_fromsys = value in self.fromsys.frame_attributes diff_attr_in_tosys = value in self.tosys.frame_attributes if diff_attr_in_fromsys or diff_attr_in_tosys: self._diff_attr_in_fromsys = diff_attr_in_fromsys self._diff_attr_in_tosys = diff_attr_in_tosys else: raise ValueError('Frame attribute name {} is not a frame ' 'attribute of {} or {}'.format(value, self.fromsys, self.tosys)) self._finite_difference_frameattr_name = value def __call__(self, fromcoord, toframe): from .representation import (CartesianRepresentation, CartesianDifferential) supcall = self.func if fromcoord.data.differentials: # this is the finite difference case if callable(self.finite_difference_dt): dt = self.finite_difference_dt(fromcoord, toframe) else: dt = self.finite_difference_dt halfdt = dt/2 from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials()) reprwithoutdiff = supcall(from_diffless, toframe) # first we use the existing differential to compute an offset due to # the already-existing velocity, but in the new frame fromcoord_cart = fromcoord.cartesian if self.symmetric_finite_difference: fwdxyz = (fromcoord_cart.xyz + fromcoord_cart.differentials['s'].d_xyz*halfdt) fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe) backxyz = (fromcoord_cart.xyz - fromcoord_cart.differentials['s'].d_xyz*halfdt) back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe) else: fwdxyz = (fromcoord_cart.xyz + fromcoord_cart.differentials['s'].d_xyz*dt) fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe) back = reprwithoutdiff diffxyz = (fwd.cartesian - back.cartesian).xyz / dt # now we compute the "induced" velocities due to any movement in # the frame itself over time attrname = self.finite_difference_frameattr_name if attrname is not None: if self.symmetric_finite_difference: if self._diff_attr_in_fromsys: kws = {attrname: getattr(from_diffless, attrname) + halfdt} from_diffless_fwd = from_diffless.replicate(**kws) else: from_diffless_fwd = from_diffless if self._diff_attr_in_tosys: kws = {attrname: getattr(toframe, attrname) + halfdt} fwd_frame = toframe.replicate_without_data(**kws) else: fwd_frame = toframe fwd = supcall(from_diffless_fwd, fwd_frame) if self._diff_attr_in_fromsys: kws = {attrname: getattr(from_diffless, attrname) - halfdt} from_diffless_back = from_diffless.replicate(**kws) else: from_diffless_back = from_diffless if self._diff_attr_in_tosys: kws = {attrname: getattr(toframe, attrname) - halfdt} back_frame = toframe.replicate_without_data(**kws) else: back_frame = toframe back = supcall(from_diffless_back, back_frame) else: if self._diff_attr_in_fromsys: kws = {attrname: getattr(from_diffless, attrname) + dt} from_diffless_fwd = from_diffless.replicate(**kws) else: from_diffless_fwd = from_diffless if self._diff_attr_in_tosys: kws = {attrname: getattr(toframe, attrname) + dt} fwd_frame = toframe.replicate_without_data(**kws) else: fwd_frame = toframe fwd = supcall(from_diffless_fwd, fwd_frame) back = reprwithoutdiff diffxyz += (fwd.cartesian - back.cartesian).xyz / dt newdiff = CartesianDifferential(diffxyz) reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff) return reprwithoutdiff.realize_frame(reprwithdiff) else: return supcall(fromcoord, toframe) class BaseAffineTransform(CoordinateTransform): """Base class for common functionality between the ``AffineTransform``-type subclasses. This base class is needed because ``AffineTransform`` and the matrix transform classes share the ``__call__()`` method, but differ in how they generate the affine parameters. ``StaticMatrixTransform`` passes in a matrix stored as a class attribute, and both of the matrix transforms pass in ``None`` for the offset. Hence, user subclasses would likely want to subclass this (rather than ``AffineTransform``) if they want to provide alternative transformations using this machinery. """ def _apply_transform(self, fromcoord, matrix, offset): from .representation import (UnitSphericalRepresentation, CartesianDifferential, SphericalDifferential, SphericalCosLatDifferential, RadialDifferential) data = fromcoord.data has_velocity = 's' in data.differentials # Bail out if no transform is actually requested if matrix is None and offset is None: return data # list of unit differentials _unit_diffs = (SphericalDifferential._unit_differential, SphericalCosLatDifferential._unit_differential) unit_vel_diff = (has_velocity and isinstance(data.differentials['s'], _unit_diffs)) rad_vel_diff = (has_velocity and isinstance(data.differentials['s'], RadialDifferential)) # Some initial checking to short-circuit doing any re-representation if # we're going to fail anyways: if isinstance(data, UnitSphericalRepresentation) and offset is not None: raise TypeError("Position information stored on coordinate frame " "is insufficient to do a full-space position " "transformation (representation class: {})" .format(data.__class__)) elif (has_velocity and (unit_vel_diff or rad_vel_diff) and offset is not None and 's' in offset.differentials): # Coordinate has a velocity, but it is not a full-space velocity # that we need to do a velocity offset raise TypeError("Velocity information stored on coordinate frame " "is insufficient to do a full-space velocity " "transformation (differential class: {})" .format(data.differentials['s'].__class__)) elif len(data.differentials) > 1: # We should never get here because the frame initializer shouldn't # allow more differentials, but this just adds protection for # subclasses that somehow skip the checks raise ValueError("Representation passed to AffineTransform contains" " multiple associated differentials. Only a single" " differential with velocity units is presently" " supported (differentials: {})." .format(str(data.differentials))) # If the representation is a UnitSphericalRepresentation, and this is # just a MatrixTransform, we have to try to turn the differential into a # Unit version of the differential (if no radial velocity) or a # sphericaldifferential with zero proper motion (if only a radial # velocity) so that the matrix operation works if (has_velocity and isinstance(data, UnitSphericalRepresentation) and not unit_vel_diff and not rad_vel_diff): # retrieve just velocity differential unit_diff = data.differentials['s'].represent_as( data.differentials['s']._unit_differential, data) data = data.with_differentials({'s': unit_diff}) # updates key # If it's a RadialDifferential, we flat-out ignore the differentials # This is because, by this point (past the validation above), we can # only possibly be doing a rotation-only transformation, and that # won't change the radial differential. We later add it back in elif rad_vel_diff: data = data.without_differentials() # Convert the representation and differentials to cartesian without # having them attached to a frame rep = data.to_cartesian() diffs = dict([(k, diff.represent_as(CartesianDifferential, data)) for k, diff in data.differentials.items()]) rep = rep.with_differentials(diffs) # Only do transform if matrix is specified. This is for speed in # transformations that only specify an offset (e.g., LSR) if matrix is not None: # Note: this applies to both representation and differentials rep = rep.transform(matrix) # TODO: if we decide to allow arithmetic between representations that # contain differentials, this can be tidied up if offset is not None: newrep = (rep.without_differentials() + offset.without_differentials()) else: newrep = rep.without_differentials() # We need a velocity (time derivative) and, for now, are strict: the # representation can only contain a velocity differential and no others. if has_velocity and not rad_vel_diff: veldiff = rep.differentials['s'] # already in Cartesian form if offset is not None and 's' in offset.differentials: veldiff = veldiff + offset.differentials['s'] newrep = newrep.with_differentials({'s': veldiff}) if isinstance(fromcoord.data, UnitSphericalRepresentation): # Special-case this because otherwise the return object will think # it has a valid distance with the default return (a # CartesianRepresentation instance) if has_velocity and not unit_vel_diff and not rad_vel_diff: # We have to first represent as the Unit types we converted to, # then put the d_distance information back in to the # differentials and re-represent as their original forms newdiff = newrep.differentials['s'] _unit_cls = fromcoord.data.differentials['s']._unit_differential newdiff = newdiff.represent_as(_unit_cls, newrep) kwargs = dict([(comp, getattr(newdiff, comp)) for comp in newdiff.components]) kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance diffs = {'s': fromcoord.data.differentials['s'].__class__( copy=False, **kwargs)} elif has_velocity and unit_vel_diff: newdiff = newrep.differentials['s'].represent_as( fromcoord.data.differentials['s'].__class__, newrep) diffs = {'s': newdiff} else: diffs = newrep.differentials newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs newrep = newrep.with_differentials(diffs) elif has_velocity and unit_vel_diff: # Here, we're in the case where the representation is not # UnitSpherical, but the differential *is* one of the UnitSpherical # types. We have to convert back to that differential class or the # resulting frame will think it has a valid radial_velocity. This # can probably be cleaned up: we currently have to go through the # dimensional version of the differential before representing as the # unit differential so that the units work out (the distance length # unit shouldn't appear in the resulting proper motions) diff_cls = fromcoord.data.differentials['s'].__class__ newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls._dimensional_differential) newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls) # We pulled the radial differential off of the representation # earlier, so now we need to put it back. But, in order to do that, we # have to turn the representation into a repr that is compatible with # having a RadialDifferential if has_velocity and rad_vel_diff: newrep = newrep.represent_as(fromcoord.data.__class__) newrep = newrep.with_differentials( {'s': fromcoord.data.differentials['s']}) return newrep def __call__(self, fromcoord, toframe): params = self._affine_params(fromcoord, toframe) newrep = self._apply_transform(fromcoord, *params) return toframe.realize_frame(newrep) @abstractmethod def _affine_params(self, fromcoord, toframe): pass class AffineTransform(BaseAffineTransform): """ A coordinate transformation specified as a function that yields a 3 x 3 cartesian transformation matrix and a tuple of displacement vectors. See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for an example. Parameters ---------- transform_func : callable A callable that has the signature ``transform_func(fromcoord, toframe)`` and returns: a (3, 3) matrix that operates on ``fromcoord`` in a Cartesian representation, and a ``CartesianRepresentation`` with (optionally) an attached velocity ``CartesianDifferential`` to represent a translation and offset in velocity to apply after the matrix operation. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ TypeError If ``transform_func`` is not callable """ def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None): if not callable(transform_func): raise TypeError('transform_func is not callable') self.transform_func = transform_func super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def _affine_params(self, fromcoord, toframe): return self.transform_func(fromcoord, toframe) class StaticMatrixTransform(BaseAffineTransform): """ A coordinate transformation defined as a 3 x 3 cartesian transformation matrix. This is distinct from DynamicMatrixTransform in that this kind of matrix is independent of frame attributes. That is, it depends *only* on the class of the frame. Parameters ---------- matrix : array-like or callable A 3 x 3 matrix for transforming 3-vectors. In most cases will be unitary (although this is not strictly required). If a callable, will be called *with no arguments* to get the matrix. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ ValueError If the matrix is not 3 x 3 """ def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None): if callable(matrix): matrix = matrix() self.matrix = np.array(matrix) if self.matrix.shape != (3, 3): raise ValueError('Provided matrix is not 3 x 3') super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def _affine_params(self, fromcoord, toframe): return self.matrix, None class DynamicMatrixTransform(BaseAffineTransform): """ A coordinate transformation specified as a function that yields a 3 x 3 cartesian transformation matrix. This is similar to, but distinct from StaticMatrixTransform, in that the matrix for this class might depend on frame attributes. Parameters ---------- matrix_func : callable A callable that has the signature ``matrix_func(fromcoord, toframe)`` and returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian representation to the new coordinate system. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. Raises ------ TypeError If ``matrix_func`` is not callable """ def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None): if not callable(matrix_func): raise TypeError('matrix_func is not callable') self.matrix_func = matrix_func super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) def _affine_params(self, fromcoord, toframe): return self.matrix_func(fromcoord, toframe), None class CompositeTransform(CoordinateTransform): """ A transformation constructed by combining together a series of single-step transformations. Note that the intermediate frame objects are constructed using any frame attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame attributes that are not present use the defaults. Parameters ---------- transforms : sequence of `CoordinateTransform` object The sequence of transformations to apply. fromsys : class The coordinate frame class to start from. tosys : class The coordinate frame class to transform into. priority : float or int The priority if this transform when finding the shortest coordinate transform path - large numbers are lower priorities. register_graph : `TransformGraph` or None A graph to register this transformation with on creation, or `None` to leave it unregistered. collapse_static_mats : bool If `True`, consecutive `StaticMatrixTransform` will be collapsed into a single transformation to speed up the calculation. """ def __init__(self, transforms, fromsys, tosys, priority=1, register_graph=None, collapse_static_mats=True): super().__init__(fromsys, tosys, priority=priority, register_graph=register_graph) if collapse_static_mats: transforms = self._combine_statics(transforms) self.transforms = tuple(transforms) def _combine_statics(self, transforms): """ Combines together sequences of `StaticMatrixTransform`s into a single transform and returns it. """ newtrans = [] for currtrans in transforms: lasttrans = newtrans[-1] if len(newtrans) > 0 else None if (isinstance(lasttrans, StaticMatrixTransform) and isinstance(currtrans, StaticMatrixTransform)): combinedmat = matrix_product(currtrans.matrix, lasttrans.matrix) newtrans[-1] = StaticMatrixTransform(combinedmat, lasttrans.fromsys, currtrans.tosys) else: newtrans.append(currtrans) return newtrans def __call__(self, fromcoord, toframe): curr_coord = fromcoord for t in self.transforms: # build an intermediate frame with attributes taken from either # `toframe`, or if not there, `fromcoord`, or if not there, use # the defaults # TODO: caching this information when creating the transform may # speed things up a lot frattrs = {} for inter_frame_attr_nm in t.tosys.get_frame_attr_names(): if hasattr(toframe, inter_frame_attr_nm): attr = getattr(toframe, inter_frame_attr_nm) frattrs[inter_frame_attr_nm] = attr elif hasattr(fromcoord, inter_frame_attr_nm): attr = getattr(fromcoord, inter_frame_attr_nm) frattrs[inter_frame_attr_nm] = attr curr_toframe = t.tosys(**frattrs) curr_coord = t(curr_coord, curr_toframe) # this is safe even in the case where self.transforms is empty, because # coordinate objects are immutable, so copying is not needed return curr_coord def _as_single_transform(self): """ Return an encapsulated version of the composite transform so that it appears to be a single transform. The returned transform internally calls the constituent transforms. If all of the transforms are affine, the merged transform is `~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no origin shifts) or `~astropy.coordinates.transformations.AffineTransform` (otherwise). If at least one of the transforms is not affine, the merged transform is `~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`. """ # Create a list of the transforms including flattening any constituent CompositeTransform transforms = [t if not isinstance(t, CompositeTransform) else t._as_single_transform() for t in self.transforms] if all([isinstance(t, BaseAffineTransform) for t in transforms]): # Check if there may be an origin shift fixed_origin = all([isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform)) for t in transforms]) # Dynamically define the transformation function def single_transform(from_coo, to_frame): if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame return None if fixed_origin else (None, None) # Create a merged attribute dictionary for any intermediate frames # For any attributes shared by the "from"/"to" frames, the "to" frame takes # precedence because this is the same choice implemented in __call__() merged_attr = {name: getattr(from_coo, name) for name in from_coo.frame_attributes} merged_attr.update({name: getattr(to_frame, name) for name in to_frame.frame_attributes}) affine_params = (None, None) # Step through each transform step (frame A -> frame B) for i, t in enumerate(transforms): # Extract the relevant attributes for frame A if i == 0: # If frame A is actually the initial frame, preserve its attributes a_attr = {name: getattr(from_coo, name) for name in from_coo.frame_attributes} else: a_attr = {k: v for k, v in merged_attr.items() if k in t.fromsys.frame_attributes} # Extract the relevant attributes for frame B b_attr = {k: v for k, v in merged_attr.items() if k in t.tosys.frame_attributes} # Obtain the affine parameters for the transform # Note that we insert some dummy data into frame A because the transformation # machinery requires there to be data present. Removing that limitation # is a possible TODO, but some care would need to be taken because some affine # transforms have branching code depending on the presence of differentials. next_affine_params = t._affine_params(t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)) # Combine the affine parameters with the running set affine_params = _combine_affine_params(affine_params, next_affine_params) # If there is no origin shift, return only the matrix return affine_params[0] if fixed_origin else affine_params # The return type depends on whether there is any origin shift transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform else: # Dynamically define the transformation function def single_transform(from_coo, to_frame): if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame return to_frame.realize_frame(from_coo.data) return self(from_coo, to_frame) transform_type = FunctionTransformWithFiniteDifference return transform_type(single_transform, self.fromsys, self.tosys, priority=self.priority) def _combine_affine_params(params, next_params): """ Combine two sets of affine parameters. The parameters for an affine transformation are a 3 x 3 Cartesian transformation matrix and a displacement vector, which can include an attached velocity. Either type of parameter can be ``None``. """ M, vec = params next_M, next_vec = next_params # Multiply the transformation matrices if they both exist if M is not None and next_M is not None: new_M = next_M @ M else: new_M = M if M is not None else next_M if vec is not None: # Transform the first displacement vector by the second transformation matrix if next_M is not None: vec = vec.transform(next_M) # Calculate the new displacement vector if next_vec is not None: if 's' in vec.differentials and 's' in next_vec.differentials: # Adding vectors with velocities takes more steps # TODO: Add support in representation.py new_vec_velocity = vec.differentials['s'] + next_vec.differentials['s'] new_vec = vec.without_differentials() + next_vec.without_differentials() new_vec = new_vec.with_differentials({'s': new_vec_velocity}) else: new_vec = vec + next_vec else: new_vec = vec else: new_vec = next_vec return new_M, new_vec # map class names to colorblind-safe colors trans_to_color = {} trans_to_color[AffineTransform] = '#555555' # gray trans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown trans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish trans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish trans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish
607495916340d3bb1bd1704d9b5fe3120ff44ea944f613d8b22dd89722e90d5a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains the classes and utility functions for distance and cartesian coordinates. """ import warnings import numpy as np from astropy import units as u from astropy.utils.exceptions import AstropyWarning from .angles import Angle __all__ = ['Distance'] __doctest_requires__ = {'*': ['scipy']} class Distance(u.SpecificTypeQuantity): """ A one-dimensional distance. This can be initialized by providing one of the following: * Distance ``value`` (array or float) and a ``unit`` * |Quantity| object with dimensionality of length * Redshift and (optionally) a `~astropy.cosmology.Cosmology` * Distance modulus * Parallax Parameters ---------- value : scalar or `~astropy.units.Quantity` ['length'] The value of this distance. unit : `~astropy.units.UnitBase` ['length'] The unit for this distance. z : float A redshift for this distance. It will be converted to a distance by computing the luminosity distance for this redshift given the cosmology specified by ``cosmology``. Must be given as a keyword argument. cosmology : `~astropy.cosmology.Cosmology` or None A cosmology that will be used to compute the distance from ``z``. If `None`, the current cosmology will be used (see `astropy.cosmology` for details). distmod : float or `~astropy.units.Quantity` The distance modulus for this distance. Note that if ``unit`` is not provided, a guess will be made at the unit between AU, pc, kpc, and Mpc. parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle` The parallax in angular units. dtype : `~numpy.dtype`, optional See `~astropy.units.Quantity`. copy : bool, optional See `~astropy.units.Quantity`. order : {'C', 'F', 'A'}, optional See `~astropy.units.Quantity`. subok : bool, optional See `~astropy.units.Quantity`. ndmin : int, optional See `~astropy.units.Quantity`. allow_negative : bool, optional Whether to allow negative distances (which are possible in some cosmologies). Default: `False`. Raises ------ `~astropy.units.UnitsError` If the ``unit`` is not a length unit. ValueError If value specified is less than 0 and ``allow_negative=False``. If ``cosmology`` is provided when ``z`` is *not* given. If either none or more than one of ``value``, ``z``, ``distmod``, or ``parallax`` were given. Examples -------- >>> from astropy import units as u >>> from astropy.cosmology import WMAP5 >>> Distance(10, u.Mpc) <Distance 10. Mpc> >>> Distance(40*u.pc, unit=u.kpc) <Distance 0.04 kpc> >>> Distance(z=0.23) # doctest: +FLOAT_CMP <Distance 1184.01657566 Mpc> >>> Distance(z=0.23, cosmology=WMAP5) # doctest: +FLOAT_CMP <Distance 1147.78831918 Mpc> >>> Distance(distmod=24.47*u.mag) # doctest: +FLOAT_CMP <Distance 783.42964277 kpc> >>> Distance(parallax=21.34*u.mas) # doctest: +FLOAT_CMP <Distance 46.86035614 pc> """ _equivalent_unit = u.m _include_easy_conversion_members = True def __new__(cls, value=None, unit=None, z=None, cosmology=None, distmod=None, parallax=None, dtype=None, copy=True, order=None, subok=False, ndmin=0, allow_negative=False): n_not_none = sum(x is not None for x in [value, z, distmod, parallax]) if n_not_none == 0: raise ValueError('none of `value`, `z`, `distmod`, or `parallax` ' 'were given to Distance constructor') elif n_not_none > 1: raise ValueError('more than one of `value`, `z`, `distmod`, or ' '`parallax` were given to Distance constructor') if value is None: # If something else but `value` was provided then a new array will # be created anyways and there is no need to copy that. copy = False if z is not None: if cosmology is None: from astropy.cosmology import default_cosmology cosmology = default_cosmology.get() value = cosmology.luminosity_distance(z) elif cosmology is not None: raise ValueError('a `cosmology` was given but `z` was not ' 'provided in Distance constructor') elif distmod is not None: value = cls._distmod_to_pc(distmod) if unit is None: # if the unit is not specified, guess based on the mean of # the log of the distance meanlogval = np.log10(value.value).mean() if meanlogval > 6: unit = u.Mpc elif meanlogval > 3: unit = u.kpc elif meanlogval < -3: # ~200 AU unit = u.AU else: unit = u.pc elif parallax is not None: if unit is None: unit = u.pc value = parallax.to_value(unit, equivalencies=u.parallax()) if np.any(parallax < 0): if allow_negative: warnings.warn( "negative parallaxes are converted to NaN " "distances even when `allow_negative=True`, " "because negative parallaxes cannot be transformed " "into distances. See the discussion in this paper: " "https://arxiv.org/abs/1507.02105", AstropyWarning) else: raise ValueError( "some parallaxes are negative, which are not " "interpretable as distances. See the discussion in " "this paper: https://arxiv.org/abs/1507.02105 . You " "can convert negative parallaxes to NaN distances by " "providing the `allow_negative=True` argument.") # now we have arguments like for a Quantity, so let it do the work distance = super().__new__( cls, value, unit, dtype=dtype, copy=copy, order=order, subok=subok, ndmin=ndmin) # This invalid catch block can be removed when the minimum numpy # version is >= 1.19 (NUMPY_LT_1_19) with np.errstate(invalid='ignore'): any_negative = np.any(distance.value < 0) if not allow_negative and any_negative: raise ValueError("distance must be >= 0. Use the argument " "`allow_negative=True` to allow negative values.") return distance @property def z(self): """Short for ``self.compute_z()``""" return self.compute_z() def compute_z(self, cosmology=None, **atzkw): """ The redshift for this distance assuming its physical distance is a luminosity distance. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` or None The cosmology to assume for this calculation, or `None` to use the current cosmology (see `astropy.cosmology` for details). **atzkw keyword arguments for :func:`~astropy.cosmology.z_at_value` Returns ------- z : `~astropy.units.Quantity` The redshift of this distance given the provided ``cosmology``. Warnings -------- This method can be slow for large arrays. The redshift is determined using :func:`astropy.cosmology.z_at_value`, which handles vector inputs (e.g. an array of distances) by element-wise calling of :func:`scipy.optimize.minimize_scalar`. For faster results consider using an interpolation table; :func:`astropy.cosmology.z_at_value` provides details. See Also -------- :func:`astropy.cosmology.z_at_value` Find the redshift corresponding to a :meth:`astropy.cosmology.FLRW.luminosity_distance`. """ from astropy.cosmology import z_at_value if cosmology is None: from astropy.cosmology import default_cosmology cosmology = default_cosmology.get() atzkw.setdefault("ztol", 1.e-10) return z_at_value(cosmology.luminosity_distance, self, **atzkw) @property def distmod(self): """The distance modulus as a `~astropy.units.Quantity`""" val = 5. * np.log10(self.to_value(u.pc)) - 5. return u.Quantity(val, u.mag, copy=False) @classmethod def _distmod_to_pc(cls, dm): dm = u.Quantity(dm, u.mag) return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False) @property def parallax(self): """The parallax angle as an `~astropy.coordinates.Angle` object""" return Angle(self.to(u.milliarcsecond, u.parallax()))
89073340df9392d61d1fd3ce2b368366c8698408e96d627325eb96a9b184fc62
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains convenience functions for coordinate-related functionality. This is generally just wrapping around the object-oriented coordinates framework, but it is useful for some users who are used to more functional interfaces. """ import warnings from collections.abc import Sequence import numpy as np import erfa from astropy import units as u from astropy.constants import c from astropy.io import ascii from astropy.utils import isiterable, data from .sky_coordinate import SkyCoord from .builtin_frames import GCRS, PrecessedGeocentric from .representation import SphericalRepresentation, CartesianRepresentation from .builtin_frames.utils import get_jd12 __all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun', 'get_constellation', 'concatenate_representations', 'concatenate'] def cartesian_to_spherical(x, y, z): """ Converts 3D rectangular cartesian coordinates to spherical polar coordinates. Note that the resulting angles are latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This function simply wraps functionality provided by the `~astropy.coordinates.CartesianRepresentation` and `~astropy.coordinates.SphericalRepresentation` classes. In general, for both performance and readability, we suggest using these classes directly. But for situations where a quick one-off conversion makes sense, this function is provided. Parameters ---------- x : scalar, array-like, or `~astropy.units.Quantity` The first Cartesian coordinate. y : scalar, array-like, or `~astropy.units.Quantity` The second Cartesian coordinate. z : scalar, array-like, or `~astropy.units.Quantity` The third Cartesian coordinate. Returns ------- r : `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : `~astropy.units.Quantity` ['angle'] The latitude in radians lon : `~astropy.units.Quantity` ['angle'] The longitude in radians """ if not hasattr(x, 'unit'): x = x * u.dimensionless_unscaled if not hasattr(y, 'unit'): y = y * u.dimensionless_unscaled if not hasattr(z, 'unit'): z = z * u.dimensionless_unscaled cart = CartesianRepresentation(x, y, z) sph = cart.represent_as(SphericalRepresentation) return sph.distance, sph.lat, sph.lon def spherical_to_cartesian(r, lat, lon): """ Converts spherical polar coordinates to rectangular cartesian coordinates. Note that the input angles should be in latitude/longitude or elevation/azimuthal form. I.e., the origin is along the equator rather than at the north pole. .. note:: This is a low-level function used internally in `astropy.coordinates`. It is provided for users if they really want to use it, but it is recommended that you use the `astropy.coordinates` coordinate systems. Parameters ---------- r : scalar, array-like, or `~astropy.units.Quantity` The radial coordinate (in the same units as the inputs). lat : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The latitude (in radians if array or scalar) lon : scalar, array-like, or `~astropy.units.Quantity` ['angle'] The longitude (in radians if array or scalar) Returns ------- x : float or array The first cartesian coordinate. y : float or array The second cartesian coordinate. z : float or array The third cartesian coordinate. """ if not hasattr(r, 'unit'): r = r * u.dimensionless_unscaled if not hasattr(lat, 'unit'): lat = lat * u.radian if not hasattr(lon, 'unit'): lon = lon * u.radian sph = SphericalRepresentation(distance=r, lat=lat, lon=lon) cart = sph.represent_as(CartesianRepresentation) return cart.x, cart.y, cart.z def get_sun(time): """ Determines the location of the sun at a given time (or times, if the input is an array `~astropy.time.Time` object), in geocentric coordinates. Parameters ---------- time : `~astropy.time.Time` The time(s) at which to compute the location of the sun. Returns ------- newsc : `~astropy.coordinates.SkyCoord` The location of the sun as a `~astropy.coordinates.SkyCoord` in the `~astropy.coordinates.GCRS` frame. Notes ----- The algorithm for determining the sun/earth relative position is based on the simplified version of VSOP2000 that is part of ERFA. Compared to JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps 250 km over the 1000-3000. """ earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb')) # We have to manually do aberration because we're outputting directly into # GCRS earth_p = earth_pv_helio['p'] earth_v = earth_pv_bary['v'] # convert barycentric velocity to units of c, but keep as array for passing in to erfa earth_v /= c.to_value(u.au/u.d) dsun = np.sqrt(np.sum(earth_p**2, axis=-1)) invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5 properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)), -earth_v, dsun, invlorentz) cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU, y=-dsun*properdir[..., 1] * u.AU, z=-dsun*properdir[..., 2] * u.AU) return SkyCoord(cartrep, frame=GCRS(obstime=time)) # global dictionary that caches repeatedly-needed info for get_constellation _constellation_data = {} def get_constellation(coord, short_name=False, constellation_list='iau'): """ Determines the constellation(s) a given coordinate object contains. Parameters ---------- coord : coordinate-like The object to determine the constellation of. short_name : bool If True, the returned names are the IAU-sanctioned abbreviated names. Otherwise, full names for the constellations are used. constellation_list : str The set of constellations to use. Currently only ``'iau'`` is supported, meaning the 88 "modern" constellations endorsed by the IAU. Returns ------- constellation : str or string array If ``coords`` contains a scalar coordinate, returns the name of the constellation. If it is an array coordinate object, it returns an array of names. Notes ----- To determine which constellation a point on the sky is in, this precesses to B1875, and then uses the Delporte boundaries of the 88 modern constellations, as tabulated by `Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_. """ if constellation_list != 'iau': raise ValueError("only 'iau' us currently supported for constellation_list") # read the data files and cache them if they haven't been already if not _constellation_data: cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat') ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name']) cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8') cnames_short_to_long = dict([(l[:3], l[4:]) for l in cnames.split('\n') if not l.startswith('#')]) cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']]) _constellation_data['ctable'] = ctable _constellation_data['cnames_long'] = cnames_long else: ctable = _constellation_data['ctable'] cnames_long = _constellation_data['cnames_long'] isscalar = coord.isscalar # if it is geocentric, we reproduce the frame but with the 1875 equinox, # which is where the constellations are defined # this yields a "dubious year" warning because ERFA considers the year 1875 # "dubious", probably because UTC isn't well-defined then and precession # models aren't precisely calibrated back to then. But it's plenty # sufficient for constellations with warnings.catch_warnings(): warnings.simplefilter('ignore', erfa.ErfaWarning) constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875')) if isscalar: rah = constel_coord.ra.ravel().hour decd = constel_coord.dec.ravel().deg else: rah = constel_coord.ra.hour decd = constel_coord.dec.deg constellidx = -np.ones(len(rah), dtype=int) notided = constellidx == -1 # should be all for i, row in enumerate(ctable): msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl']) constellidx[notided & msk] = i notided = constellidx == -1 if np.sum(notided) == 0: break else: raise ValueError(f'Could not find constellation for coordinates {constel_coord[notided]}') if short_name: names = ctable['name'][constellidx] else: names = cnames_long[constellidx] if isscalar: return names[0] else: return names def _concatenate_components(reps_difs, names): """ Helper function for the concatenate function below. Gets and concatenates all of the individual components for an iterable of representations or differentials. """ values = [] for name in names: unit0 = getattr(reps_difs[0], name).unit # Go via to_value because np.concatenate doesn't work with Quantity data_vals = [getattr(x, name).to_value(unit0) for x in reps_difs] concat_vals = np.concatenate(np.atleast_1d(*data_vals)) concat_vals = concat_vals << unit0 values.append(concat_vals) return values def concatenate_representations(reps): """ Combine multiple representation objects into a single instance by concatenating the data in each component. Currently, all of the input representations have to be the same type. This properly handles differential or velocity data, but all input objects must have the same differential object type as well. Parameters ---------- reps : sequence of `~astropy.coordinates.BaseRepresentation` The objects to concatenate Returns ------- rep : `~astropy.coordinates.BaseRepresentation` subclass instance A single representation object with its data set to the concatenation of all the elements of the input sequence of representations. """ if not isinstance(reps, (Sequence, np.ndarray)): raise TypeError('Input must be a list or iterable of representation ' 'objects.') # First, validate that the representations are the same, and # concatenate all of the positional data: rep_type = type(reps[0]) if any(type(r) != rep_type for r in reps): raise TypeError('Input representations must all have the same type.') # Construct the new representation with the concatenated data from the # representations passed in values = _concatenate_components(reps, rep_type.attr_classes.keys()) new_rep = rep_type(*values) has_diff = any('s' in rep.differentials for rep in reps) if has_diff and any('s' not in rep.differentials for rep in reps): raise ValueError('Input representations must either all contain ' 'differentials, or not contain differentials.') if has_diff: dif_type = type(reps[0].differentials['s']) if any('s' not in r.differentials or type(r.differentials['s']) != dif_type for r in reps): raise TypeError('All input representations must have the same ' 'differential type.') values = _concatenate_components([r.differentials['s'] for r in reps], dif_type.attr_classes.keys()) new_dif = dif_type(*values) new_rep = new_rep.with_differentials({'s': new_dif}) return new_rep def concatenate(coords): """ Combine multiple coordinate objects into a single `~astropy.coordinates.SkyCoord`. "Coordinate objects" here mean frame objects with data, `~astropy.coordinates.SkyCoord`, or representation objects. Currently, they must all be in the same frame, but in a future version this may be relaxed to allow inhomogeneous sequences of objects. Parameters ---------- coords : sequence of coordinate-like The objects to concatenate Returns ------- cskycoord : SkyCoord A single sky coordinate with its data set to the concatenation of all the elements in ``coords`` """ if getattr(coords, 'isscalar', False) or not isiterable(coords): raise TypeError('The argument to concatenate must be iterable') scs = [SkyCoord(coord, copy=False) for coord in coords] # Check that all frames are equivalent for sc in scs[1:]: if not sc.is_equivalent_frame(scs[0]): raise ValueError("All inputs must have equivalent frames: " "{} != {}".format(sc, scs[0])) # TODO: this can be changed to SkyCoord.from_representation() for a speed # boost when we switch to using classmethods return SkyCoord(concatenate_representations([c.data for c in coords]), frame=scs[0].frame)
e3fb251ae695c7b3b3aa4317fc1707793b17ae71a42935a43b03bae54a31387e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import re from collections.abc import Sequence import inspect import numpy as np from astropy.units import Unit, IrreducibleUnit from astropy import units as u from .baseframe import (BaseCoordinateFrame, frame_transform_graph, _get_repr_cls, _get_diff_cls) from .builtin_frames import ICRS from .representation import (BaseRepresentation, SphericalRepresentation, UnitSphericalRepresentation) """ This module contains utility functions to make the SkyCoord initializer more modular and maintainable. No functionality here should be in the public API, but rather used as part of creating SkyCoord objects. """ PLUS_MINUS_RE = re.compile(r'(\+|\-)') J_PREFIXED_RA_DEC_RE = re.compile( r"""J # J prefix ([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits ([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits """, re.VERBOSE) def _get_frame_class(frame): """ Get a frame class from the input `frame`, which could be a frame name string, or frame class. """ if isinstance(frame, str): frame_names = frame_transform_graph.get_names() if frame not in frame_names: raise ValueError('Coordinate frame name "{}" is not a known ' 'coordinate frame ({})' .format(frame, sorted(frame_names))) frame_cls = frame_transform_graph.lookup_name(frame) elif inspect.isclass(frame) and issubclass(frame, BaseCoordinateFrame): frame_cls = frame else: raise ValueError("Coordinate frame must be a frame name or frame " "class, not a '{}'".format(frame.__class__.__name__)) return frame_cls _conflict_err_msg = ("Coordinate attribute '{0}'={1!r} conflicts with keyword " "argument '{0}'={2!r}. This usually means an attribute " "was set on one of the input objects and also in the " "keyword arguments to {3}") def _get_frame_without_data(args, kwargs): """ Determines the coordinate frame from input SkyCoord args and kwargs. This function extracts (removes) all frame attributes from the kwargs and determines the frame class either using the kwargs, or using the first element in the args (if a single frame object is passed in, for example). This function allows a frame to be specified as a string like 'icrs' or a frame class like ICRS, or an instance ICRS(), as long as the instance frame attributes don't conflict with kwargs passed in (which could require a three-way merge with the coordinate data possibly specified via the args). """ from .sky_coordinate import SkyCoord # We eventually (hopefully) fill and return these by extracting the frame # and frame attributes from the input: frame_cls = None frame_cls_kwargs = {} # The first place to check: the frame could be specified explicitly frame = kwargs.pop('frame', None) if frame is not None: # Here the frame was explicitly passed in as a keyword argument. # If the frame is an instance or SkyCoord, we extract the attributes # and split the instance into the frame class and an attributes dict if isinstance(frame, SkyCoord): # If the frame was passed as a SkyCoord, we also want to preserve # any extra attributes (e.g., obstime) if they are not already # specified in the kwargs. We preserve these extra attributes by # adding them to the kwargs dict: for attr in frame._extra_frameattr_names: if (attr in kwargs and np.any(getattr(frame, attr) != kwargs[attr])): # This SkyCoord attribute passed in with the frame= object # conflicts with an attribute passed in directly to the # SkyCoord initializer as a kwarg: raise ValueError(_conflict_err_msg .format(attr, getattr(frame, attr), kwargs[attr], 'SkyCoord')) else: kwargs[attr] = getattr(frame, attr) frame = frame.frame if isinstance(frame, BaseCoordinateFrame): # Extract any frame attributes for attr in frame.get_frame_attr_names(): # If the frame was specified as an instance, we have to make # sure that no frame attributes were specified as kwargs - this # would require a potential three-way merge: if attr in kwargs: raise ValueError("Cannot specify frame attribute '{}' " "directly as an argument to SkyCoord " "because a frame instance was passed in. " "Either pass a frame class, or modify the " "frame attributes of the input frame " "instance.".format(attr)) elif not frame.is_frame_attr_default(attr): kwargs[attr] = getattr(frame, attr) frame_cls = frame.__class__ # Make sure we propagate representation/differential _type choices, # unless these are specified directly in the kwargs: kwargs.setdefault('representation_type', frame.representation_type) kwargs.setdefault('differential_type', frame.differential_type) if frame_cls is None: # frame probably a string frame_cls = _get_frame_class(frame) # Check that the new frame doesn't conflict with existing coordinate frame # if a coordinate is supplied in the args list. If the frame still had not # been set by this point and a coordinate was supplied, then use that frame. for arg in args: # this catches the "single list passed in" case. For that case we want # to allow the first argument to set the class. That's OK because # _parse_coordinate_arg goes and checks that the frames match between # the first and all the others if (isinstance(arg, (Sequence, np.ndarray)) and len(args) == 1 and len(arg) > 0): arg = arg[0] coord_frame_obj = coord_frame_cls = None if isinstance(arg, BaseCoordinateFrame): coord_frame_obj = arg elif isinstance(arg, SkyCoord): coord_frame_obj = arg.frame if coord_frame_obj is not None: coord_frame_cls = coord_frame_obj.__class__ frame_diff = coord_frame_obj.get_representation_cls('s') if frame_diff is not None: # we do this check because otherwise if there's no default # differential (i.e. it is None), the code below chokes. but # None still gets through if the user *requests* it kwargs.setdefault('differential_type', frame_diff) for attr in coord_frame_obj.get_frame_attr_names(): if (attr in kwargs and not coord_frame_obj.is_frame_attr_default(attr) and np.any(kwargs[attr] != getattr(coord_frame_obj, attr))): raise ValueError("Frame attribute '{}' has conflicting " "values between the input coordinate data " "and either keyword arguments or the " "frame specification (frame=...): " "{} =/= {}" .format(attr, getattr(coord_frame_obj, attr), kwargs[attr])) elif (attr not in kwargs and not coord_frame_obj.is_frame_attr_default(attr)): kwargs[attr] = getattr(coord_frame_obj, attr) if coord_frame_cls is not None: if frame_cls is None: frame_cls = coord_frame_cls elif frame_cls is not coord_frame_cls: raise ValueError("Cannot override frame='{}' of input " "coordinate with new frame='{}'. Instead, " "transform the coordinate." .format(coord_frame_cls.__name__, frame_cls.__name__)) if frame_cls is None: frame_cls = ICRS # By now, frame_cls should be set - if it's not, something went wrong if not issubclass(frame_cls, BaseCoordinateFrame): # We should hopefully never get here... raise ValueError(f'Frame class has unexpected type: {frame_cls.__name__}') for attr in frame_cls.frame_attributes: if attr in kwargs: frame_cls_kwargs[attr] = kwargs.pop(attr) if 'representation_type' in kwargs: frame_cls_kwargs['representation_type'] = _get_repr_cls( kwargs.pop('representation_type')) differential_type = kwargs.pop('differential_type', None) if differential_type is not None: frame_cls_kwargs['differential_type'] = _get_diff_cls( differential_type) return frame_cls, frame_cls_kwargs def _parse_coordinate_data(frame, args, kwargs): """ Extract coordinate data from the args and kwargs passed to SkyCoord. By this point, we assume that all of the frame attributes have been extracted from kwargs (see _get_frame_without_data()), so all that are left are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in any of the valid ways. """ valid_skycoord_kwargs = {} valid_components = {} info = None # Look through the remaining kwargs to see if any are valid attribute names # by asking the frame transform graph: attr_names = list(kwargs.keys()) for attr in attr_names: if attr in frame_transform_graph.frame_attributes: valid_skycoord_kwargs[attr] = kwargs.pop(attr) # By this point in parsing the arguments, anything left in the args and # kwargs should be data. Either as individual components, or a list of # objects, or a representation, etc. # Get units of components units = _get_representation_component_units(args, kwargs) # Grab any frame-specific attr names like `ra` or `l` or `distance` from # kwargs and move them to valid_components. valid_components.update(_get_representation_attrs(frame, units, kwargs)) # Error if anything is still left in kwargs if kwargs: # The next few lines add a more user-friendly error message to a # common and confusing situation when the user specifies, e.g., # `pm_ra` when they really should be passing `pm_ra_cosdec`. The # extra error should only turn on when the positional representation # is spherical, and when the component 'pm_<lon>' is passed. pm_message = '' if frame.representation_type == SphericalRepresentation: frame_names = list(frame.get_representation_component_names().keys()) lon_name = frame_names[0] lat_name = frame_names[1] if f'pm_{lon_name}' in list(kwargs.keys()): pm_message = ('\n\n By default, most frame classes expect ' 'the longitudinal proper motion to include ' 'the cos(latitude) term, named ' '`pm_{}_cos{}`. Did you mean to pass in ' 'this component?' .format(lon_name, lat_name)) raise ValueError('Unrecognized keyword argument(s) {}{}' .format(', '.join(f"'{key}'" for key in kwargs), pm_message)) # Finally deal with the unnamed args. This figures out what the arg[0] # is and returns a dict with appropriate key/values for initializing # frame class. Note that differentials are *never* valid args, only # kwargs. So they are not accounted for here (unless they're in a frame # or SkyCoord object) if args: if len(args) == 1: # One arg which must be a coordinate. In this case coord_kwargs # will contain keys like 'ra', 'dec', 'distance' along with any # frame attributes like equinox or obstime which were explicitly # specified in the coordinate object (i.e. non-default). _skycoord_kwargs, _components = _parse_coordinate_arg( args[0], frame, units, kwargs) # Copy other 'info' attr only if it has actually been defined. if 'info' in getattr(args[0], '__dict__', ()): info = args[0].info elif len(args) <= 3: _skycoord_kwargs = {} _components = {} frame_attr_names = frame.representation_component_names.keys() repr_attr_names = frame.representation_component_names.values() for arg, frame_attr_name, repr_attr_name, unit in zip(args, frame_attr_names, repr_attr_names, units): attr_class = frame.representation_type.attr_classes[repr_attr_name] _components[frame_attr_name] = attr_class(arg, unit=unit) else: raise ValueError('Must supply no more than three positional arguments, got {}' .format(len(args))) # The next two loops copy the component and skycoord attribute data into # their final, respective "valid_" dictionaries. For each, we check that # there are no relevant conflicts with values specified by the user # through other means: # First validate the component data for attr, coord_value in _components.items(): if attr in valid_components: raise ValueError(_conflict_err_msg .format(attr, coord_value, valid_components[attr], 'SkyCoord')) valid_components[attr] = coord_value # Now validate the custom SkyCoord attributes for attr, value in _skycoord_kwargs.items(): if (attr in valid_skycoord_kwargs and np.any(valid_skycoord_kwargs[attr] != value)): raise ValueError(_conflict_err_msg .format(attr, value, valid_skycoord_kwargs[attr], 'SkyCoord')) valid_skycoord_kwargs[attr] = value return valid_skycoord_kwargs, valid_components, info def _get_representation_component_units(args, kwargs): """ Get the unit from kwargs for the *representation* components (not the differentials). """ if 'unit' not in kwargs: units = [None, None, None] else: units = kwargs.pop('unit') if isinstance(units, str): units = [x.strip() for x in units.split(',')] # Allow for input like unit='deg' or unit='m' if len(units) == 1: units = [units[0], units[0], units[0]] elif isinstance(units, (Unit, IrreducibleUnit)): units = [units, units, units] try: units = [(Unit(x) if x else None) for x in units] units.extend(None for x in range(3 - len(units))) if len(units) > 3: raise ValueError() except Exception as err: raise ValueError('Unit keyword must have one to three unit values as ' 'tuple or comma-separated string.') from err return units def _parse_coordinate_arg(coords, frame, units, init_kwargs): """ Single unnamed arg supplied. This must be: - Coordinate frame with data - Representation - SkyCoord - List or tuple of: - String which splits into two values - Iterable with two values - SkyCoord, frame, or representation objects. Returns a dict mapping coordinate attribute names to values (or lists of values) """ from .sky_coordinate import SkyCoord is_scalar = False # Differentiate between scalar and list input # valid_kwargs = {} # Returned dict of lon, lat, and distance (optional) components = {} skycoord_kwargs = {} frame_attr_names = list(frame.representation_component_names.keys()) repr_attr_names = list(frame.representation_component_names.values()) repr_attr_classes = list(frame.representation_type.attr_classes.values()) n_attr_names = len(repr_attr_names) # Turn a single string into a list of strings for convenience if isinstance(coords, str): is_scalar = True coords = [coords] if isinstance(coords, (SkyCoord, BaseCoordinateFrame)): # Note that during parsing of `frame` it is checked that any coordinate # args have the same frame as explicitly supplied, so don't worry here. if not coords.has_data: raise ValueError('Cannot initialize from a frame without coordinate data') data = coords.data.represent_as(frame.representation_type) values = [] # List of values corresponding to representation attrs repr_attr_name_to_drop = [] for repr_attr_name in repr_attr_names: # If coords did not have an explicit distance then don't include in initializers. if (isinstance(coords.data, UnitSphericalRepresentation) and repr_attr_name == 'distance'): repr_attr_name_to_drop.append(repr_attr_name) continue # Get the value from `data` in the eventual representation values.append(getattr(data, repr_attr_name)) # drop the ones that were skipped because they were distances for nametodrop in repr_attr_name_to_drop: nameidx = repr_attr_names.index(nametodrop) del repr_attr_names[nameidx] del units[nameidx] del frame_attr_names[nameidx] del repr_attr_classes[nameidx] if coords.data.differentials and 's' in coords.data.differentials: orig_vel = coords.data.differentials['s'] vel = coords.data.represent_as(frame.representation_type, frame.get_representation_cls('s')).differentials['s'] for frname, reprname in frame.get_representation_component_names('s').items(): if (reprname == 'd_distance' and not hasattr(orig_vel, reprname) and 'unit' in orig_vel.get_name()): continue values.append(getattr(vel, reprname)) units.append(None) frame_attr_names.append(frname) repr_attr_names.append(reprname) repr_attr_classes.append(vel.attr_classes[reprname]) for attr in frame_transform_graph.frame_attributes: value = getattr(coords, attr, None) use_value = (isinstance(coords, SkyCoord) or attr not in coords.get_frame_attr_names()) if use_value and value is not None: skycoord_kwargs[attr] = value elif isinstance(coords, BaseRepresentation): if coords.differentials and 's' in coords.differentials: diffs = frame.get_representation_cls('s') data = coords.represent_as(frame.representation_type, diffs) values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names] for frname, reprname in frame.get_representation_component_names('s').items(): values.append(getattr(data.differentials['s'], reprname)) units.append(None) frame_attr_names.append(frname) repr_attr_names.append(reprname) repr_attr_classes.append(data.differentials['s'].attr_classes[reprname]) else: data = coords.represent_as(frame.representation_type) values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names] elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if' and coords.ndim == 2 and coords.shape[1] <= 3): # 2-d array of coordinate values. Handle specially for efficiency. values = coords.transpose() # Iterates over repr attrs elif isinstance(coords, (Sequence, np.ndarray)): # Handles list-like input. vals = [] is_ra_dec_representation = ('ra' in frame.representation_component_names and 'dec' in frame.representation_component_names) coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation) if any(isinstance(coord, coord_types) for coord in coords): # this parsing path is used when there are coordinate-like objects # in the list - instead of creating lists of values, we create # SkyCoords from the list elements and then combine them. scs = [SkyCoord(coord, **init_kwargs) for coord in coords] # Check that all frames are equivalent for sc in scs[1:]: if not sc.is_equivalent_frame(scs[0]): raise ValueError("List of inputs don't have equivalent " "frames: {} != {}".format(sc, scs[0])) # Now use the first to determine if they are all UnitSpherical allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation) # get the frame attributes from the first coord in the list, because # from the above we know it matches all the others. First copy over # the attributes that are in the frame itself, then copy over any # extras in the SkyCoord for fattrnm in scs[0].frame.frame_attributes: skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm) for fattrnm in scs[0]._extra_frameattr_names: skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm) # Now combine the values, to be used below values = [] for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names): if allunitsphrepr and repr_attr_name == 'distance': # if they are *all* UnitSpherical, don't give a distance continue data_vals = [] for sc in scs: data_val = getattr(sc, data_attr_name) data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val) concat_vals = np.concatenate(data_vals) # Hack because np.concatenate doesn't fully work with Quantity if isinstance(concat_vals, u.Quantity): concat_vals._unit = data_val.unit values.append(concat_vals) else: # none of the elements are "frame-like" # turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]] for coord in coords: if isinstance(coord, str): coord1 = coord.split() if len(coord1) == 6: coord = (' '.join(coord1[:3]), ' '.join(coord1[3:])) elif is_ra_dec_representation: coord = _parse_ra_dec(coord) else: coord = coord1 vals.append(coord) # Assumes coord is a sequence at this point # Do some basic validation of the list elements: all have a length and all # lengths the same try: n_coords = sorted(set(len(x) for x in vals)) except Exception as err: raise ValueError('One or more elements of input sequence ' 'does not have a length.') from err if len(n_coords) > 1: raise ValueError('Input coordinate values must have ' 'same number of elements, found {}'.format(n_coords)) n_coords = n_coords[0] # Must have no more coord inputs than representation attributes if n_coords > n_attr_names: raise ValueError('Input coordinates have {} values but ' 'representation {} only accepts {}' .format(n_coords, frame.representation_type.get_name(), n_attr_names)) # Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)] # (ok since we know it is exactly rectangular). (Note: can't just use zip(*values) # because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed # while (a1, a2, ..) doesn't work. values = [list(x) for x in zip(*vals)] if is_scalar: values = [x[0] for x in values] else: raise ValueError('Cannot parse coordinates from first argument') # Finally we have a list of values from which to create the keyword args # for the frame initialization. Validate by running through the appropriate # class initializer and supply units (which might be None). try: for frame_attr_name, repr_attr_class, value, unit in zip( frame_attr_names, repr_attr_classes, values, units): components[frame_attr_name] = repr_attr_class(value, unit=unit, copy=False) except Exception as err: raise ValueError('Cannot parse first argument data "{}" for attribute ' '{}'.format(value, frame_attr_name)) from err return skycoord_kwargs, components def _get_representation_attrs(frame, units, kwargs): """ Find instances of the "representation attributes" for specifying data for this frame. Pop them off of kwargs, run through the appropriate class constructor (to validate and apply unit), and put into the output valid_kwargs. "Representation attributes" are the frame-specific aliases for the underlying data values in the representation, e.g. "ra" for "lon" for many equatorial spherical representations, or "w" for "x" in the cartesian representation of Galactic. This also gets any *differential* kwargs, because they go into the same frame initializer later on. """ frame_attr_names = frame.representation_component_names.keys() repr_attr_classes = frame.representation_type.attr_classes.values() valid_kwargs = {} for frame_attr_name, repr_attr_class, unit in zip(frame_attr_names, repr_attr_classes, units): value = kwargs.pop(frame_attr_name, None) if value is not None: try: valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit) except u.UnitConversionError as err: error_message = ( f"Unit '{unit}' ({unit.physical_type}) could not be applied to '{frame_attr_name}'. " "This can occur when passing units for some coordinate components " "when other components are specified as Quantity objects. " "Either pass a list of units for all components (and unit-less coordinate data), " "or pass Quantities for all components." ) raise u.UnitConversionError(error_message) from err # also check the differentials. They aren't included in the units keyword, # so we only look for the names. differential_type = frame.differential_type if differential_type is not None: for frame_name, repr_name in frame.get_representation_component_names('s').items(): diff_attr_class = differential_type.attr_classes[repr_name] value = kwargs.pop(frame_name, None) if value is not None: valid_kwargs[frame_name] = diff_attr_class(value) return valid_kwargs def _parse_ra_dec(coord_str): """ Parse RA and Dec values from a coordinate string. Currently the following formats are supported: * space separated 6-value format * space separated <6-value format, this requires a plus or minus sign separation between RA and Dec * sign separated format * JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits * JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits Parameters ---------- coord_str : str Coordinate string to parse. Returns ------- coord : str or list of str Parsed coordinate values. """ if isinstance(coord_str, str): coord1 = coord_str.split() else: # This exception should never be raised from SkyCoord raise TypeError('coord_str must be a single str') if len(coord1) == 6: coord = (' '.join(coord1[:3]), ' '.join(coord1[3:])) elif len(coord1) > 2: coord = PLUS_MINUS_RE.split(coord_str) coord = (coord[0], ' '.join(coord[1:])) elif len(coord1) == 1: match_j = J_PREFIXED_RA_DEC_RE.match(coord_str) if match_j: coord = match_j.groups() if len(coord[0].split('.')[0]) == 7: coord = (f'{coord[0][0:3]} {coord[0][3:5]} {coord[0][5:]}', f'{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}') else: coord = (f'{coord[0][0:2]} {coord[0][2:4]} {coord[0][4:]}', f'{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}') else: coord = PLUS_MINUS_RE.split(coord_str) coord = (coord[0], ' '.join(coord[1:])) else: coord = coord1 return coord
a9c118cb514d27d7c1709080cabc29b065a98989a47632adc3e50474acccbd5d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Framework and base classes for coordinate frames/"low-level" coordinate classes. """ # Standard library import copy import inspect from collections import namedtuple, defaultdict import warnings # Dependencies import numpy as np # Project from astropy.utils.compat.misc import override__dir__ from astropy.utils.decorators import lazyproperty, format_doc from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning from astropy import units as u from astropy.utils import ShapedLikeNDArray, check_broadcast from .transformations import TransformGraph from . import representation as r from .angles import Angle from .attributes import Attribute __all__ = ['BaseCoordinateFrame', 'frame_transform_graph', 'GenericFrame', 'RepresentationMapping'] # the graph used for all transformations between frames frame_transform_graph = TransformGraph() def _get_repr_cls(value): """ Return a valid representation class from ``value`` or raise exception. """ if value in r.REPRESENTATION_CLASSES: value = r.REPRESENTATION_CLASSES[value] elif (not isinstance(value, type) or not issubclass(value, r.BaseRepresentation)): raise ValueError( 'Representation is {!r} but must be a BaseRepresentation class ' 'or one of the string aliases {}'.format( value, list(r.REPRESENTATION_CLASSES))) return value def _get_diff_cls(value): """ Return a valid differential class from ``value`` or raise exception. As originally created, this is only used in the SkyCoord initializer, so if that is refactored, this function my no longer be necessary. """ if value in r.DIFFERENTIAL_CLASSES: value = r.DIFFERENTIAL_CLASSES[value] elif (not isinstance(value, type) or not issubclass(value, r.BaseDifferential)): raise ValueError( 'Differential is {!r} but must be a BaseDifferential class ' 'or one of the string aliases {}'.format( value, list(r.DIFFERENTIAL_CLASSES))) return value def _get_repr_classes(base, **differentials): """Get valid representation and differential classes. Parameters ---------- base : str or `~astropy.coordinates.BaseRepresentation` subclass class for the representation of the base coordinates. If a string, it is looked up among the known representation classes. **differentials : dict of str or `~astropy.coordinates.BaseDifferentials` Keys are like for normal differentials, i.e., 's' for a first derivative in time, etc. If an item is set to `None`, it will be guessed from the base class. Returns ------- repr_classes : dict of subclasses The base class is keyed by 'base'; the others by the keys of ``diffferentials``. """ base = _get_repr_cls(base) repr_classes = {'base': base} for name, differential_type in differentials.items(): if differential_type == 'base': # We don't want to fail for this case. differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None) elif differential_type in r.DIFFERENTIAL_CLASSES: differential_type = r.DIFFERENTIAL_CLASSES[differential_type] elif (differential_type is not None and (not isinstance(differential_type, type) or not issubclass(differential_type, r.BaseDifferential))): raise ValueError( 'Differential is {!r} but must be a BaseDifferential class ' 'or one of the string aliases {}'.format( differential_type, list(r.DIFFERENTIAL_CLASSES))) repr_classes[name] = differential_type return repr_classes _RepresentationMappingBase = \ namedtuple('RepresentationMapping', ('reprname', 'framename', 'defaultunit')) class RepresentationMapping(_RepresentationMappingBase): """ This `~collections.namedtuple` is used with the ``frame_specific_representation_info`` attribute to tell frames what attribute names (and default units) to use for a particular representation. ``reprname`` and ``framename`` should be strings, while ``defaultunit`` can be either an astropy unit, the string ``'recommended'`` (which is degrees for Angles, nothing otherwise), or None (to indicate that no unit mapping should be done). """ def __new__(cls, reprname, framename, defaultunit='recommended'): # this trick just provides some defaults return super().__new__(cls, reprname, framename, defaultunit) base_doc = """{__doc__} Parameters ---------- data : `~astropy.coordinates.BaseRepresentation` subclass instance A representation object or ``None`` to have no data (or use the coordinate component arguments, see below). {components} representation_type : `~astropy.coordinates.BaseRepresentation` subclass, str, optional A representation class or string name of a representation class. This sets the expected input representation class, thereby changing the expected keyword arguments for the data passed in. For example, passing ``representation_type='cartesian'`` will make the classes expect position data with cartesian names, i.e. ``x, y, z`` in most cases unless overridden via ``frame_specific_representation_info``. To see this frame's names, check out ``<this frame>().representation_info``. differential_type : `~astropy.coordinates.BaseDifferential` subclass, str, dict, optional A differential class or dictionary of differential classes (currently only a velocity differential with key 's' is supported). This sets the expected input differential class, thereby changing the expected keyword arguments of the data passed in. For example, passing ``differential_type='cartesian'`` will make the classes expect velocity data with the argument names ``v_x, v_y, v_z`` unless overridden via ``frame_specific_representation_info``. To see this frame's names, check out ``<this frame>().representation_info``. copy : bool, optional If `True` (default), make copies of the input coordinate arrays. Can only be passed in as a keyword argument. {footer} """ _components = """ *args, **kwargs Coordinate components, with names that depend on the subclass. """ @format_doc(base_doc, components=_components, footer="") class BaseCoordinateFrame(ShapedLikeNDArray): """ The base class for coordinate frames. This class is intended to be subclassed to create instances of specific systems. Subclasses can implement the following attributes: * `default_representation` A subclass of `~astropy.coordinates.BaseRepresentation` that will be treated as the default representation of this frame. This is the representation assumed by default when the frame is created. * `default_differential` A subclass of `~astropy.coordinates.BaseDifferential` that will be treated as the default differential class of this frame. This is the differential class assumed by default when the frame is created. * `~astropy.coordinates.Attribute` class attributes Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined using a descriptor class. See the narrative documentation or built-in classes code for details. * `frame_specific_representation_info` A dictionary mapping the name or class of a representation to a list of `~astropy.coordinates.RepresentationMapping` objects that tell what names and default units should be used on this frame for the components of that representation. Unless overridden via `frame_specific_representation_info`, velocity name defaults are: * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `SphericalCosLatDifferential` proper motion components * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper motion components * ``radial_velocity`` for any ``d_distance`` component * ``v_{x,y,z}`` for `CartesianDifferential` velocity components where ``{lon}`` and ``{lat}`` are the frame names of the angular components. """ default_representation = None default_differential = None # Specifies special names and units for representation and differential # attributes. frame_specific_representation_info = {} frame_attributes = {} # Default empty frame_attributes dict def __init_subclass__(cls, **kwargs): # We first check for explicitly set values for these: default_repr = getattr(cls, 'default_representation', None) default_diff = getattr(cls, 'default_differential', None) repr_info = getattr(cls, 'frame_specific_representation_info', None) # Then, to make sure this works for subclasses-of-subclasses, we also # have to check for cases where the attribute names have already been # replaced by underscore-prefaced equivalents by the logic below: if default_repr is None or isinstance(default_repr, property): default_repr = getattr(cls, '_default_representation', None) if default_diff is None or isinstance(default_diff, property): default_diff = getattr(cls, '_default_differential', None) if repr_info is None or isinstance(repr_info, property): repr_info = getattr(cls, '_frame_specific_representation_info', None) repr_info = cls._infer_repr_info(repr_info) # Make read-only properties for the frame class attributes that should # be read-only to make them immutable after creation. # We copy attributes instead of linking to make sure there's no # accidental cross-talk between classes cls._create_readonly_property('default_representation', default_repr, 'Default representation for position data') cls._create_readonly_property('default_differential', default_diff, 'Default representation for differential data ' '(e.g., velocity)') cls._create_readonly_property('frame_specific_representation_info', copy.deepcopy(repr_info), 'Mapping for frame-specific component names') # Set the frame attributes. We first construct the attributes from # superclasses, going in reverse order to keep insertion order, # and then add any attributes from the frame now being defined # (if any old definitions are overridden, this keeps the order). # Note that we cannot simply start with the inherited frame_attributes # since we could be a mixin between multiple coordinate frames. # TODO: Should this be made to use readonly_prop_factory as well or # would it be inconvenient for getting the frame_attributes from # classes? frame_attrs = {} for basecls in reversed(cls.__bases__): if issubclass(basecls, BaseCoordinateFrame): frame_attrs.update(basecls.frame_attributes) for k, v in cls.__dict__.items(): if isinstance(v, Attribute): frame_attrs[k] = v cls.frame_attributes = frame_attrs # Deal with setting the name of the frame: if not hasattr(cls, 'name'): cls.name = cls.__name__.lower() elif (BaseCoordinateFrame not in cls.__bases__ and cls.name in [getattr(base, 'name', None) for base in cls.__bases__]): # This may be a subclass of a subclass of BaseCoordinateFrame, # like ICRS(BaseRADecFrame). In this case, cls.name will have been # set by init_subclass cls.name = cls.__name__.lower() # A cache that *must be unique to each frame class* - it is # insufficient to share them with superclasses, hence the need to put # them in the meta cls._frame_class_cache = {} super().__init_subclass__(**kwargs) def __init__(self, *args, copy=True, representation_type=None, differential_type=None, **kwargs): self._attr_names_with_defaults = [] self._representation = self._infer_representation(representation_type, differential_type) self._data = self._infer_data(args, copy, kwargs) # possibly None. # Set frame attributes, if any values = {} for fnm, fdefault in self.get_frame_attr_names().items(): # Read-only frame attributes are defined as FrameAttribute # descriptors which are not settable, so set 'real' attributes as # the name prefaced with an underscore. if fnm in kwargs: value = kwargs.pop(fnm) setattr(self, '_' + fnm, value) # Validate attribute by getting it. If the instance has data, # this also checks its shape is OK. If not, we do it below. values[fnm] = getattr(self, fnm) else: setattr(self, '_' + fnm, fdefault) self._attr_names_with_defaults.append(fnm) if kwargs: raise TypeError( f'Coordinate frame {self.__class__.__name__} got unexpected ' f'keywords: {list(kwargs)}') # We do ``is None`` because self._data might evaluate to false for # empty arrays or data == 0 if self._data is None: # No data: we still need to check that any non-scalar attributes # have consistent shapes. Collect them for all attributes with # size > 1 (which should be array-like and thus have a shape). shapes = {fnm: value.shape for fnm, value in values.items() if getattr(value, 'shape', ())} if shapes: if len(shapes) > 1: try: self._no_data_shape = check_broadcast(*shapes.values()) except ValueError as err: raise ValueError( f"non-scalar attributes with inconsistent shapes: {shapes}") from err # Above, we checked that it is possible to broadcast all # shapes. By getting and thus validating the attributes, # we verify that the attributes can in fact be broadcast. for fnm in shapes: getattr(self, fnm) else: self._no_data_shape = shapes.popitem()[1] else: self._no_data_shape = () # The logic of this block is not related to the previous one if self._data is not None: # This makes the cache keys backwards-compatible, but also adds # support for having differentials attached to the frame data # representation object. if 's' in self._data.differentials: # TODO: assumes a velocity unit differential key = (self._data.__class__.__name__, self._data.differentials['s'].__class__.__name__, False) else: key = (self._data.__class__.__name__, False) # Set up representation cache. self.cache['representation'][key] = self._data def _infer_representation(self, representation_type, differential_type): if representation_type is None and differential_type is None: return {'base': self.default_representation, 's': self.default_differential} if representation_type is None: representation_type = self.default_representation if (inspect.isclass(differential_type) and issubclass(differential_type, r.BaseDifferential)): # TODO: assumes the differential class is for the velocity # differential differential_type = {'s': differential_type} elif isinstance(differential_type, str): # TODO: assumes the differential class is for the velocity # differential diff_cls = r.DIFFERENTIAL_CLASSES[differential_type] differential_type = {'s': diff_cls} elif differential_type is None: if representation_type == self.default_representation: differential_type = {'s': self.default_differential} else: differential_type = {'s': 'base'} # see set_representation_cls() return _get_repr_classes(representation_type, **differential_type) def _infer_data(self, args, copy, kwargs): # if not set below, this is a frame with no data representation_data = None differential_data = None args = list(args) # need to be able to pop them if (len(args) > 0) and (isinstance(args[0], r.BaseRepresentation) or args[0] is None): representation_data = args.pop(0) # This can still be None if len(args) > 0: raise TypeError( 'Cannot create a frame with both a representation object ' 'and other positional arguments') if representation_data is not None: diffs = representation_data.differentials differential_data = diffs.get('s', None) if ((differential_data is None and len(diffs) > 0) or (differential_data is not None and len(diffs) > 1)): raise ValueError('Multiple differentials are associated ' 'with the representation object passed in ' 'to the frame initializer. Only a single ' 'velocity differential is supported. Got: ' '{}'.format(diffs)) else: representation_cls = self.get_representation_cls() # Get any representation data passed in to the frame initializer # using keyword or positional arguments for the component names repr_kwargs = {} for nmkw, nmrep in self.representation_component_names.items(): if len(args) > 0: # first gather up positional args repr_kwargs[nmrep] = args.pop(0) elif nmkw in kwargs: repr_kwargs[nmrep] = kwargs.pop(nmkw) # special-case the Spherical->UnitSpherical if no `distance` if repr_kwargs: # TODO: determine how to get rid of the part before the "try" - # currently removing it has a performance regression for # unitspherical because of the try-related overhead. # Also frames have no way to indicate what the "distance" is if repr_kwargs.get('distance', True) is None: del repr_kwargs['distance'] if (issubclass(representation_cls, r.SphericalRepresentation) and 'distance' not in repr_kwargs): representation_cls = representation_cls._unit_representation try: representation_data = representation_cls(copy=copy, **repr_kwargs) except TypeError as e: # this except clause is here to make the names of the # attributes more human-readable. Without this the names # come from the representation instead of the frame's # attribute names. try: representation_data = ( representation_cls._unit_representation( copy=copy, **repr_kwargs)) except Exception: msg = str(e) names = self.get_representation_component_names() for frame_name, repr_name in names.items(): msg = msg.replace(repr_name, frame_name) msg = msg.replace('__init__()', f'{self.__class__.__name__}()') e.args = (msg,) raise e # Now we handle the Differential data: # Get any differential data passed in to the frame initializer # using keyword or positional arguments for the component names differential_cls = self.get_representation_cls('s') diff_component_names = self.get_representation_component_names('s') diff_kwargs = {} for nmkw, nmrep in diff_component_names.items(): if len(args) > 0: # first gather up positional args diff_kwargs[nmrep] = args.pop(0) elif nmkw in kwargs: diff_kwargs[nmrep] = kwargs.pop(nmkw) if diff_kwargs: if (hasattr(differential_cls, '_unit_differential') and 'd_distance' not in diff_kwargs): differential_cls = differential_cls._unit_differential elif len(diff_kwargs) == 1 and 'd_distance' in diff_kwargs: differential_cls = r.RadialDifferential try: differential_data = differential_cls(copy=copy, **diff_kwargs) except TypeError as e: # this except clause is here to make the names of the # attributes more human-readable. Without this the names # come from the representation instead of the frame's # attribute names. msg = str(e) names = self.get_representation_component_names('s') for frame_name, repr_name in names.items(): msg = msg.replace(repr_name, frame_name) msg = msg.replace('__init__()', f'{self.__class__.__name__}()') e.args = (msg,) raise if len(args) > 0: raise TypeError( '{}.__init__ had {} remaining unhandled arguments'.format( self.__class__.__name__, len(args))) if representation_data is None and differential_data is not None: raise ValueError("Cannot pass in differential component data " "without positional (representation) data.") if differential_data: # Check that differential data provided has units compatible # with time-derivative of representation data. # NOTE: there is no dimensionless time while lengths can be # dimensionless (u.dimensionless_unscaled). for comp in representation_data.components: if (diff_comp := f'd_{comp}') in differential_data.components: current_repr_unit = representation_data._units[comp] current_diff_unit = differential_data._units[diff_comp] expected_unit = current_repr_unit / u.s if not current_diff_unit.is_equivalent(expected_unit): for key, val in self.get_representation_component_names().items(): if val == comp: current_repr_name = key break for key, val in self.get_representation_component_names('s').items(): if val == diff_comp: current_diff_name = key break raise ValueError( f'{current_repr_name} has unit "{current_repr_unit}" with physical ' f'type "{current_repr_unit.physical_type}", but {current_diff_name} ' f'has incompatible unit "{current_diff_unit}" with physical type ' f'"{current_diff_unit.physical_type}" instead of the expected ' f'"{(expected_unit).physical_type}".') representation_data = representation_data.with_differentials({'s': differential_data}) return representation_data @classmethod def _infer_repr_info(cls, repr_info): # Unless overridden via `frame_specific_representation_info`, velocity # name defaults are (see also docstring for BaseCoordinateFrame): # * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for # `SphericalCosLatDifferential` proper motion components # * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper # motion components # * ``radial_velocity`` for any `d_distance` component # * ``v_{x,y,z}`` for `CartesianDifferential` velocity components # where `{lon}` and `{lat}` are the frame names of the angular # components. if repr_info is None: repr_info = {} # the tuple() call below is necessary because if it is not there, # the iteration proceeds in a difficult-to-predict manner in the # case that one of the class objects hash is such that it gets # revisited by the iteration. The tuple() call prevents this by # making the items iterated over fixed regardless of how the dict # changes for cls_or_name in tuple(repr_info.keys()): if isinstance(cls_or_name, str): # TODO: this provides a layer of backwards compatibility in # case the key is a string, but now we want explicit classes. _cls = _get_repr_cls(cls_or_name) repr_info[_cls] = repr_info.pop(cls_or_name) # The default spherical names are 'lon' and 'lat' repr_info.setdefault(r.SphericalRepresentation, [RepresentationMapping('lon', 'lon'), RepresentationMapping('lat', 'lat')]) sph_component_map = {m.reprname: m.framename for m in repr_info[r.SphericalRepresentation]} repr_info.setdefault(r.SphericalCosLatDifferential, [ RepresentationMapping( 'd_lon_coslat', 'pm_{lon}_cos{lat}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_lat', 'pm_{lat}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) ]) repr_info.setdefault(r.SphericalDifferential, [ RepresentationMapping('d_lon', 'pm_{lon}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_lat', 'pm_{lat}'.format(**sph_component_map), u.mas/u.yr), RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) ]) repr_info.setdefault(r.CartesianDifferential, [ RepresentationMapping('d_x', 'v_x', u.km/u.s), RepresentationMapping('d_y', 'v_y', u.km/u.s), RepresentationMapping('d_z', 'v_z', u.km/u.s)]) # Unit* classes should follow the same naming conventions # TODO: this adds some unnecessary mappings for the Unit classes, so # this could be cleaned up, but in practice doesn't seem to have any # negative side effects repr_info.setdefault(r.UnitSphericalRepresentation, repr_info[r.SphericalRepresentation]) repr_info.setdefault(r.UnitSphericalCosLatDifferential, repr_info[r.SphericalCosLatDifferential]) repr_info.setdefault(r.UnitSphericalDifferential, repr_info[r.SphericalDifferential]) return repr_info @classmethod def _create_readonly_property(cls, attr_name, value, doc=None): private_attr = '_' + attr_name def getter(self): return getattr(self, private_attr) setattr(cls, private_attr, value) setattr(cls, attr_name, property(getter, doc=doc)) @lazyproperty def cache(self): """ Cache for this frame, a dict. It stores anything that should be computed from the coordinate data (*not* from the frame attributes). This can be used in functions to store anything that might be expensive to compute but might be re-used by some other function. E.g.:: if 'user_data' in myframe.cache: data = myframe.cache['user_data'] else: myframe.cache['user_data'] = data = expensive_func(myframe.lat) If in-place modifications are made to the frame data, the cache should be cleared:: myframe.cache.clear() """ return defaultdict(dict) @property def data(self): """ The coordinate data for this object. If this frame has no data, an `ValueError` will be raised. Use `has_data` to check if data is present on this frame object. """ if self._data is None: raise ValueError('The frame object "{!r}" does not have ' 'associated data'.format(self)) return self._data @property def has_data(self): """ True if this frame has `data`, False otherwise. """ return self._data is not None @property def shape(self): return self.data.shape if self.has_data else self._no_data_shape # We have to override the ShapedLikeNDArray definitions, since our shape # does not have to be that of the data. def __len__(self): return len(self.data) def __bool__(self): return self.has_data and self.size > 0 @property def size(self): return self.data.size @property def isscalar(self): return self.has_data and self.data.isscalar @classmethod def get_frame_attr_names(cls): return {name: getattr(cls, name) for name in cls.frame_attributes} def get_representation_cls(self, which='base'): """The class used for part of this frame's data. Parameters ---------- which : ('base', 's', `None`) The class of which part to return. 'base' means the class used to represent the coordinates; 's' the first derivative to time, i.e., the class representing the proper motion and/or radial velocity. If `None`, return a dict with both. Returns ------- representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`. """ if which is not None: return self._representation[which] else: return self._representation def set_representation_cls(self, base=None, s='base'): """Set representation and/or differential class for this frame's data. Parameters ---------- base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional The name or subclass to use to represent the coordinate data. s : `~astropy.coordinates.BaseDifferential` subclass, optional The differential subclass to use to represent any velocities, such as proper motion and radial velocity. If equal to 'base', which is the default, it will be inferred from the representation. If `None`, the representation will drop any differentials. """ if base is None: base = self._representation['base'] self._representation = _get_repr_classes(base=base, s=s) representation_type = property( fget=get_representation_cls, fset=set_representation_cls, doc="""The representation class used for this frame's data. This will be a subclass from `~astropy.coordinates.BaseRepresentation`. Can also be *set* using the string name of the representation. If you wish to set an explicit differential class (rather than have it be inferred), use the ``set_representation_cls`` method. """) @property def differential_type(self): """ The differential used for this frame's data. This will be a subclass from `~astropy.coordinates.BaseDifferential`. For simultaneous setting of representation and differentials, see the ``set_representation_cls`` method. """ return self.get_representation_cls('s') @differential_type.setter def differential_type(self, value): self.set_representation_cls(s=value) @classmethod def _get_representation_info(cls): # This exists as a class method only to support handling frame inputs # without units, which are deprecated and will be removed. This can be # moved into the representation_info property at that time. # note that if so moved, the cache should be acceessed as # self.__class__._frame_class_cache if cls._frame_class_cache.get('last_reprdiff_hash', None) != r.get_reprdiff_cls_hash(): repr_attrs = {} for repr_diff_cls in (list(r.REPRESENTATION_CLASSES.values()) + list(r.DIFFERENTIAL_CLASSES.values())): repr_attrs[repr_diff_cls] = {'names': [], 'units': []} for c, c_cls in repr_diff_cls.attr_classes.items(): repr_attrs[repr_diff_cls]['names'].append(c) rec_unit = u.deg if issubclass(c_cls, Angle) else None repr_attrs[repr_diff_cls]['units'].append(rec_unit) for repr_diff_cls, mappings in cls._frame_specific_representation_info.items(): # take the 'names' and 'units' tuples from repr_attrs, # and then use the RepresentationMapping objects # to update as needed for this frame. nms = repr_attrs[repr_diff_cls]['names'] uns = repr_attrs[repr_diff_cls]['units'] comptomap = dict([(m.reprname, m) for m in mappings]) for i, c in enumerate(repr_diff_cls.attr_classes.keys()): if c in comptomap: mapp = comptomap[c] nms[i] = mapp.framename # need the isinstance because otherwise if it's a unit it # will try to compare to the unit string representation if not (isinstance(mapp.defaultunit, str) and mapp.defaultunit == 'recommended'): uns[i] = mapp.defaultunit # else we just leave it as recommended_units says above # Convert to tuples so that this can't mess with frame internals repr_attrs[repr_diff_cls]['names'] = tuple(nms) repr_attrs[repr_diff_cls]['units'] = tuple(uns) cls._frame_class_cache['representation_info'] = repr_attrs cls._frame_class_cache['last_reprdiff_hash'] = r.get_reprdiff_cls_hash() return cls._frame_class_cache['representation_info'] @lazyproperty def representation_info(self): """ A dictionary with the information of what attribute names for this frame apply to particular representations. """ return self._get_representation_info() def get_representation_component_names(self, which='base'): out = {} repr_or_diff_cls = self.get_representation_cls(which) if repr_or_diff_cls is None: return out data_names = repr_or_diff_cls.attr_classes.keys() repr_names = self.representation_info[repr_or_diff_cls]['names'] for repr_name, data_name in zip(repr_names, data_names): out[repr_name] = data_name return out def get_representation_component_units(self, which='base'): out = {} repr_or_diff_cls = self.get_representation_cls(which) if repr_or_diff_cls is None: return out repr_attrs = self.representation_info[repr_or_diff_cls] repr_names = repr_attrs['names'] repr_units = repr_attrs['units'] for repr_name, repr_unit in zip(repr_names, repr_units): if repr_unit: out[repr_name] = repr_unit return out representation_component_names = property(get_representation_component_names) representation_component_units = property(get_representation_component_units) def _replicate(self, data, copy=False, **kwargs): """Base for replicating a frame, with possibly different attributes. Produces a new instance of the frame using the attributes of the old frame (unless overridden) and with the data given. Parameters ---------- data : `~astropy.coordinates.BaseRepresentation` or None Data to use in the new frame instance. If `None`, it will be a data-less frame. copy : bool, optional Whether data and the attributes on the old frame should be copied (default), or passed on by reference. **kwargs Any attributes that should be overridden. """ # This is to provide a slightly nicer error message if the user tries # to use frame_obj.representation instead of frame_obj.data to get the # underlying representation object [e.g., #2890] if inspect.isclass(data): raise TypeError('Class passed as data instead of a representation ' 'instance. If you called frame.representation, this' ' returns the representation class. frame.data ' 'returns the instantiated object - you may want to ' ' use this instead.') if copy and data is not None: data = data.copy() for attr in self.get_frame_attr_names(): if (attr not in self._attr_names_with_defaults and attr not in kwargs): value = getattr(self, attr) if copy: value = value.copy() kwargs[attr] = value return self.__class__(data, copy=False, **kwargs) def replicate(self, copy=False, **kwargs): """ Return a replica of the frame, optionally with new frame attributes. The replica is a new frame object that has the same data as this frame object and with frame attributes overridden if they are provided as extra keyword arguments to this method. If ``copy`` is set to `True` then a copy of the internal arrays will be made. Otherwise the replica will use a reference to the original arrays when possible to save memory. The internal arrays are normally not changeable by the user so in most cases it should not be necessary to set ``copy`` to `True`. Parameters ---------- copy : bool, optional If True, the resulting object is a copy of the data. When False, references are used where possible. This rule also applies to the frame attributes. Any additional keywords are treated as frame attributes to be set on the new frame object. Returns ------- frameobj : `BaseCoordinateFrame` subclass instance Replica of this object, but possibly with new frame attributes. """ return self._replicate(self.data, copy=copy, **kwargs) def replicate_without_data(self, copy=False, **kwargs): """ Return a replica without data, optionally with new frame attributes. The replica is a new frame object without data but with the same frame attributes as this object, except where overridden by extra keyword arguments to this method. The ``copy`` keyword determines if the frame attributes are truly copied vs being references (which saves memory for cases where frame attributes are large). This method is essentially the converse of `realize_frame`. Parameters ---------- copy : bool, optional If True, the resulting object has copies of the frame attributes. When False, references are used where possible. Any additional keywords are treated as frame attributes to be set on the new frame object. Returns ------- frameobj : `BaseCoordinateFrame` subclass instance Replica of this object, but without data and possibly with new frame attributes. """ return self._replicate(None, copy=copy, **kwargs) def realize_frame(self, data, **kwargs): """ Generates a new frame with new data from another frame (which may or may not have data). Roughly speaking, the converse of `replicate_without_data`. Parameters ---------- data : `~astropy.coordinates.BaseRepresentation` The representation to use as the data for the new frame. Any additional keywords are treated as frame attributes to be set on the new frame object. In particular, `representation_type` can be specified. Returns ------- frameobj : `BaseCoordinateFrame` subclass instance A new object in *this* frame, with the same frame attributes as this one, but with the ``data`` as the coordinate data. """ return self._replicate(data, **kwargs) def represent_as(self, base, s='base', in_frame_units=False): """ Generate and return a new representation of this frame's `data` as a Representation object. Note: In order to make an in-place change of the representation of a Frame or SkyCoord object, set the ``representation`` attribute of that object to the desired new representation, or use the ``set_representation_cls`` method to also set the differential. Parameters ---------- base : subclass of BaseRepresentation or string The type of representation to generate. Must be a *class* (not an instance), or the string name of the representation class. s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional Class in which any velocities should be represented. Must be a *class* (not an instance), or the string name of the differential class. If equal to 'base' (default), inferred from the base class. If `None`, all velocity information is dropped. in_frame_units : bool, keyword-only Force the representation units to match the specified units particular to this frame Returns ------- newrep : BaseRepresentation-derived object A new representation object of this frame's `data`. Raises ------ AttributeError If this object had no `data` Examples -------- >>> from astropy import units as u >>> from astropy.coordinates import SkyCoord, CartesianRepresentation >>> coord = SkyCoord(0*u.deg, 0*u.deg) >>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP <CartesianRepresentation (x, y, z) [dimensionless] (1., 0., 0.)> >>> coord.representation_type = CartesianRepresentation >>> coord # doctest: +FLOAT_CMP <SkyCoord (ICRS): (x, y, z) [dimensionless] (1., 0., 0.)> """ # For backwards compatibility (because in_frame_units used to be the # 2nd argument), we check to see if `new_differential` is a boolean. If # it is, we ignore the value of `new_differential` and warn about the # position change if isinstance(s, bool): warnings.warn("The argument position for `in_frame_units` in " "`represent_as` has changed. Use as a keyword " "argument if needed.", AstropyWarning) in_frame_units = s s = 'base' # In the future, we may want to support more differentials, in which # case one probably needs to define **kwargs above and use it here. # But for now, we only care about the velocity. repr_classes = _get_repr_classes(base=base, s=s) representation_cls = repr_classes['base'] # We only keep velocity information if 's' in self.data.differentials: # For the default 'base' option in which _get_repr_classes has # given us a best guess based on the representation class, we only # use it if the class we had already is incompatible. if (s == 'base' and (self.data.differentials['s'].__class__ in representation_cls._compatible_differentials)): differential_cls = self.data.differentials['s'].__class__ else: differential_cls = repr_classes['s'] elif s is None or s == 'base': differential_cls = None else: raise TypeError('Frame data has no associated differentials ' '(i.e. the frame has no velocity data) - ' 'represent_as() only accepts a new ' 'representation.') if differential_cls: cache_key = (representation_cls.__name__, differential_cls.__name__, in_frame_units) else: cache_key = (representation_cls.__name__, in_frame_units) cached_repr = self.cache['representation'].get(cache_key) if not cached_repr: if differential_cls: # Sanity check to ensure we do not just drop radial # velocity. TODO: should Representation.represent_as # allow this transformation in the first place? if (isinstance(self.data, r.UnitSphericalRepresentation) and issubclass(representation_cls, r.CartesianRepresentation) and not isinstance(self.data.differentials['s'], (r.UnitSphericalDifferential, r.UnitSphericalCosLatDifferential, r.RadialDifferential))): raise u.UnitConversionError( 'need a distance to retrieve a cartesian representation ' 'when both radial velocity and proper motion are present, ' 'since otherwise the units cannot match.') # TODO NOTE: only supports a single differential data = self.data.represent_as(representation_cls, differential_cls) diff = data.differentials['s'] # TODO: assumes velocity else: data = self.data.represent_as(representation_cls) # If the new representation is known to this frame and has a defined # set of names and units, then use that. new_attrs = self.representation_info.get(representation_cls) if new_attrs and in_frame_units: datakwargs = dict((comp, getattr(data, comp)) for comp in data.components) for comp, new_attr_unit in zip(data.components, new_attrs['units']): if new_attr_unit: datakwargs[comp] = datakwargs[comp].to(new_attr_unit) data = data.__class__(copy=False, **datakwargs) if differential_cls: # the original differential data_diff = self.data.differentials['s'] # If the new differential is known to this frame and has a # defined set of names and units, then use that. new_attrs = self.representation_info.get(differential_cls) if new_attrs and in_frame_units: diffkwargs = dict((comp, getattr(diff, comp)) for comp in diff.components) for comp, new_attr_unit in zip(diff.components, new_attrs['units']): # Some special-casing to treat a situation where the # input data has a UnitSphericalDifferential or a # RadialDifferential. It is re-represented to the # frame's differential class (which might be, e.g., a # dimensional Differential), so we don't want to try to # convert the empty component units if (isinstance(data_diff, (r.UnitSphericalDifferential, r.UnitSphericalCosLatDifferential)) and comp not in data_diff.__class__.attr_classes): continue elif (isinstance(data_diff, r.RadialDifferential) and comp not in data_diff.__class__.attr_classes): continue # Try to convert to requested units. Since that might # not be possible (e.g., for a coordinate with proper # motion but without distance, one cannot convert to a # cartesian differential in km/s), we allow the unit # conversion to fail. See gh-7028 for discussion. if new_attr_unit and hasattr(diff, comp): try: diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit) except Exception: pass diff = diff.__class__(copy=False, **diffkwargs) # Here we have to bypass using with_differentials() because # it has a validation check. But because # .representation_type and .differential_type don't point to # the original classes, if the input differential is a # RadialDifferential, it usually gets turned into a # SphericalCosLatDifferential (or whatever the default is) # with strange units for the d_lon and d_lat attributes. # This then causes the dictionary key check to fail (i.e. # comparison against `diff._get_deriv_key()`) data._differentials.update({'s': diff}) self.cache['representation'][cache_key] = data return self.cache['representation'][cache_key] def transform_to(self, new_frame): """ Transform this object's coordinate data to a new frame. Parameters ---------- new_frame : coordinate-like or `BaseCoordinateFrame` subclass instance The frame to transform this coordinate frame into. The frame class option is deprecated. Returns ------- transframe : coordinate-like A new object with the coordinate data represented in the ``newframe`` system. Raises ------ ValueError If there is no possible transformation route. """ from .errors import ConvertError if self._data is None: raise ValueError('Cannot transform a frame with no data') if (getattr(self.data, 'differentials', None) and hasattr(self, 'obstime') and hasattr(new_frame, 'obstime') and np.any(self.obstime != new_frame.obstime)): raise NotImplementedError('You cannot transform a frame that has ' 'velocities to another frame at a ' 'different obstime. If you think this ' 'should (or should not) be possible, ' 'please comment at https://github.com/astropy/astropy/issues/6280') if inspect.isclass(new_frame): warnings.warn("Transforming a frame instance to a frame class (as opposed to another " "frame instance) will not be supported in the future. Either " "explicitly instantiate the target frame, or first convert the source " "frame instance to a `astropy.coordinates.SkyCoord` and use its " "`transform_to()` method.", AstropyDeprecationWarning) # Use the default frame attributes for this class new_frame = new_frame() if hasattr(new_frame, '_sky_coord_frame'): # Input new_frame is not a frame instance or class and is most # likely a SkyCoord object. new_frame = new_frame._sky_coord_frame trans = frame_transform_graph.get_transform(self.__class__, new_frame.__class__) if trans is None: if new_frame is self.__class__: # no special transform needed, but should update frame info return new_frame.realize_frame(self.data) msg = 'Cannot transform from {0} to {1}' raise ConvertError(msg.format(self.__class__, new_frame.__class__)) return trans(self, new_frame) def is_transformable_to(self, new_frame): """ Determines if this coordinate frame can be transformed to another given frame. Parameters ---------- new_frame : `BaseCoordinateFrame` subclass or instance The proposed frame to transform into. Returns ------- transformable : bool or str `True` if this can be transformed to ``new_frame``, `False` if not, or the string 'same' if ``new_frame`` is the same system as this object but no transformation is defined. Notes ----- A return value of 'same' means the transformation will work, but it will just give back a copy of this object. The intended usage is:: if coord.is_transformable_to(some_unknown_frame): coord2 = coord.transform_to(some_unknown_frame) This will work even if ``some_unknown_frame`` turns out to be the same frame class as ``coord``. This is intended for cases where the frame is the same regardless of the frame attributes (e.g. ICRS), but be aware that it *might* also indicate that someone forgot to define the transformation between two objects of the same frame class but with different attributes. """ new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__ trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls) if trans is None: if new_frame_cls is self.__class__: return 'same' else: return False else: return True def is_frame_attr_default(self, attrnm): """ Determine whether or not a frame attribute has its value because it's the default value, or because this frame was created with that value explicitly requested. Parameters ---------- attrnm : str The name of the attribute to check. Returns ------- isdefault : bool True if the attribute ``attrnm`` has its value by default, False if it was specified at creation of this frame. """ return attrnm in self._attr_names_with_defaults @staticmethod def _frameattr_equiv(left_fattr, right_fattr): """ Determine if two frame attributes are equivalent. Implemented as a staticmethod mainly as a convenient location, although conceivable it might be desirable for subclasses to override this behavior. Primary purpose is to check for equality of representations. This aspect can actually be simplified/removed now that representations have equality defined. Secondary purpose is to check for equality of coordinate attributes, which first checks whether they themselves are in equivalent frames before checking for equality in the normal fashion. This is because checking for equality with non-equivalent frames raises an error. """ if left_fattr is right_fattr: # shortcut if it's exactly the same object return True elif left_fattr is None or right_fattr is None: # shortcut if one attribute is unspecified and the other isn't return False left_is_repr = isinstance(left_fattr, r.BaseRepresentationOrDifferential) right_is_repr = isinstance(right_fattr, r.BaseRepresentationOrDifferential) if left_is_repr and right_is_repr: # both are representations. if (getattr(left_fattr, 'differentials', False) or getattr(right_fattr, 'differentials', False)): warnings.warn('Two representation frame attributes were ' 'checked for equivalence when at least one of' ' them has differentials. This yields False ' 'even if the underlying representations are ' 'equivalent (although this may change in ' 'future versions of Astropy)', AstropyWarning) return False if isinstance(right_fattr, left_fattr.__class__): # if same representation type, compare components. return np.all([(getattr(left_fattr, comp) == getattr(right_fattr, comp)) for comp in left_fattr.components]) else: # convert to cartesian and see if they match return np.all(left_fattr.to_cartesian().xyz == right_fattr.to_cartesian().xyz) elif left_is_repr or right_is_repr: return False left_is_coord = isinstance(left_fattr, BaseCoordinateFrame) right_is_coord = isinstance(right_fattr, BaseCoordinateFrame) if left_is_coord and right_is_coord: # both are coordinates if left_fattr.is_equivalent_frame(right_fattr): return np.all(left_fattr == right_fattr) else: return False elif left_is_coord or right_is_coord: return False return np.all(left_fattr == right_fattr) def is_equivalent_frame(self, other): """ Checks if this object is the same frame as the ``other`` object. To be the same frame, two objects must be the same frame class and have the same frame attributes. Note that it does *not* matter what, if any, data either object has. Parameters ---------- other : :class:`~astropy.coordinates.BaseCoordinateFrame` the other frame to check Returns ------- isequiv : bool True if the frames are the same, False if not. Raises ------ TypeError If ``other`` isn't a `BaseCoordinateFrame` or subclass. """ if self.__class__ == other.__class__: for frame_attr_name in self.get_frame_attr_names(): if not self._frameattr_equiv(getattr(self, frame_attr_name), getattr(other, frame_attr_name)): return False return True elif not isinstance(other, BaseCoordinateFrame): raise TypeError("Tried to do is_equivalent_frame on something that " "isn't a frame") else: return False def __repr__(self): frameattrs = self._frame_attrs_repr() data_repr = self._data_repr() if frameattrs: frameattrs = f' ({frameattrs})' if data_repr: return f'<{self.__class__.__name__} Coordinate{frameattrs}: {data_repr}>' else: return f'<{self.__class__.__name__} Frame{frameattrs}>' def _data_repr(self): """Returns a string representation of the coordinate data.""" if not self.has_data: return '' if self.representation_type: if (hasattr(self.representation_type, '_unit_representation') and isinstance(self.data, self.representation_type._unit_representation)): rep_cls = self.data.__class__ else: rep_cls = self.representation_type if 's' in self.data.differentials: dif_cls = self.get_representation_cls('s') dif_data = self.data.differentials['s'] if isinstance(dif_data, (r.UnitSphericalDifferential, r.UnitSphericalCosLatDifferential, r.RadialDifferential)): dif_cls = dif_data.__class__ else: dif_cls = None data = self.represent_as(rep_cls, dif_cls, in_frame_units=True) data_repr = repr(data) # Generate the list of component names out of the repr string part1, _, remainder = data_repr.partition('(') if remainder != '': comp_str, _, part2 = remainder.partition(')') comp_names = comp_str.split(', ') # Swap in frame-specific component names invnames = dict([(nmrepr, nmpref) for nmpref, nmrepr in self.representation_component_names.items()]) for i, name in enumerate(comp_names): comp_names[i] = invnames.get(name, name) # Reassemble the repr string data_repr = part1 + '(' + ', '.join(comp_names) + ')' + part2 else: data = self.data data_repr = repr(self.data) if data_repr.startswith('<' + data.__class__.__name__): # remove both the leading "<" and the space after the name, as well # as the trailing ">" data_repr = data_repr[(len(data.__class__.__name__) + 2):-1] else: data_repr = 'Data:\n' + data_repr if 's' in self.data.differentials: data_repr_spl = data_repr.split('\n') if 'has differentials' in data_repr_spl[-1]: diffrepr = repr(data.differentials['s']).split('\n') if diffrepr[0].startswith('<'): diffrepr[0] = ' ' + ' '.join(diffrepr[0].split(' ')[1:]) for frm_nm, rep_nm in self.get_representation_component_names('s').items(): diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm) if diffrepr[-1].endswith('>'): diffrepr[-1] = diffrepr[-1][:-1] data_repr_spl[-1] = '\n'.join(diffrepr) data_repr = '\n'.join(data_repr_spl) return data_repr def _frame_attrs_repr(self): """ Returns a string representation of the frame's attributes, if any. """ attr_strs = [] for attribute_name in self.get_frame_attr_names(): attr = getattr(self, attribute_name) # Check to see if this object has a way of representing itself # specific to being an attribute of a frame. (Note, this is not the # Attribute class, it's the actual object). if hasattr(attr, "_astropy_repr_in_frame"): attrstr = attr._astropy_repr_in_frame() else: attrstr = str(attr) attr_strs.append(f"{attribute_name}={attrstr}") return ', '.join(attr_strs) def _apply(self, method, *args, **kwargs): """Create a new instance, applying a method to the underlying data. In typical usage, the method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.), which are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be applied to the underlying arrays in the representation (e.g., ``x``, ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), as well as to any frame attributes that have a shape, with the results used to create a new instance. Internally, it is also used to apply functions to the above parts (in particular, `~numpy.broadcast_to`). Parameters ---------- method : str or callable If str, it is the name of a method that is applied to the internal ``components``. If callable, the function is applied. *args : tuple Any positional arguments for ``method``. **kwargs : dict Any keyword arguments for ``method``. """ def apply_method(value): if isinstance(value, ShapedLikeNDArray): return value._apply(method, *args, **kwargs) else: if callable(method): return method(value, *args, **kwargs) else: return getattr(value, method)(*args, **kwargs) new = super().__new__(self.__class__) if hasattr(self, '_representation'): new._representation = self._representation.copy() new._attr_names_with_defaults = self._attr_names_with_defaults.copy() for attr in self.frame_attributes: _attr = '_' + attr if attr in self._attr_names_with_defaults: setattr(new, _attr, getattr(self, _attr)) else: value = getattr(self, _attr) if getattr(value, 'shape', ()): value = apply_method(value) elif method == 'copy' or method == 'flatten': # flatten should copy also for a single element array, but # we cannot use it directly for array scalars, since it # always returns a one-dimensional array. So, just copy. value = copy.copy(value) setattr(new, _attr, value) if self.has_data: new._data = apply_method(self.data) else: new._data = None shapes = [getattr(new, '_' + attr).shape for attr in new.frame_attributes if (attr not in new._attr_names_with_defaults and getattr(getattr(new, '_' + attr), 'shape', ()))] if shapes: new._no_data_shape = (check_broadcast(*shapes) if len(shapes) > 1 else shapes[0]) else: new._no_data_shape = () return new def __setitem__(self, item, value): if self.__class__ is not value.__class__: raise TypeError(f'can only set from object of same class: ' f'{self.__class__.__name__} vs. ' f'{value.__class__.__name__}') if not self.is_equivalent_frame(value): raise ValueError('can only set frame item from an equivalent frame') if value._data is None: raise ValueError('can only set frame with value that has data') if self._data is None: raise ValueError('cannot set frame which has no data') if self.shape == (): raise TypeError(f"scalar '{self.__class__.__name__}' frame object " f"does not support item assignment") if self._data is None: raise ValueError('can only set frame if it has data') if self._data.__class__ is not value._data.__class__: raise TypeError(f'can only set from object of same class: ' f'{self._data.__class__.__name__} vs. ' f'{value._data.__class__.__name__}') if self._data._differentials: # Can this ever occur? (Same class but different differential keys). # This exception is not tested since it is not clear how to generate it. if self._data._differentials.keys() != value._data._differentials.keys(): raise ValueError(f'setitem value must have same differentials') for key, self_diff in self._data._differentials.items(): if self_diff.__class__ is not value._data._differentials[key].__class__: raise TypeError(f'can only set from object of same class: ' f'{self_diff.__class__.__name__} vs. ' f'{value._data._differentials[key].__class__.__name__}') # Set representation data self._data[item] = value._data # Frame attributes required to be identical by is_equivalent_frame, # no need to set them here. self.cache.clear() @override__dir__ def __dir__(self): """ Override the builtin `dir` behavior to include representation names. TODO: dynamic representation transforms (i.e. include cylindrical et al.). """ dir_values = set(self.representation_component_names) dir_values |= set(self.get_representation_component_names('s')) return dir_values def __getattr__(self, attr): """ Allow access to attributes on the representation and differential as found via ``self.get_representation_component_names``. TODO: We should handle dynamic representation transforms here (e.g., `.cylindrical`) instead of defining properties as below. """ # attr == '_representation' is likely from the hasattr() test in the # representation property which is used for # self.representation_component_names. # # Prevent infinite recursion here. if attr.startswith('_'): return self.__getattribute__(attr) # Raise AttributeError. repr_names = self.representation_component_names if attr in repr_names: if self._data is None: self.data # this raises the "no data" error by design - doing it # this way means we don't have to replicate the error message here rep = self.represent_as(self.representation_type, in_frame_units=True) val = getattr(rep, repr_names[attr]) return val diff_names = self.get_representation_component_names('s') if attr in diff_names: if self._data is None: self.data # see above. # TODO: this doesn't work for the case when there is only # unitspherical information. The differential_type gets set to the # default_differential, which expects full information, so the # units don't work out rep = self.represent_as(in_frame_units=True, **self.get_representation_cls(None)) val = getattr(rep.differentials['s'], diff_names[attr]) return val return self.__getattribute__(attr) # Raise AttributeError. def __setattr__(self, attr, value): # Don't slow down access of private attributes! if not attr.startswith('_'): if hasattr(self, 'representation_info'): repr_attr_names = set() for representation_attr in self.representation_info.values(): repr_attr_names.update(representation_attr['names']) if attr in repr_attr_names: raise AttributeError( f'Cannot set any frame attribute {attr}') super().__setattr__(attr, value) def __eq__(self, value): """Equality operator for frame. This implements strict equality and requires that the frames are equivalent and that the representation data are exactly equal. """ is_equiv = self.is_equivalent_frame(value) if self._data is None and value._data is None: # For Frame with no data, == compare is same as is_equivalent_frame() return is_equiv if not is_equiv: raise TypeError(f'cannot compare: objects must have equivalent frames: ' f'{self.replicate_without_data()} vs. ' f'{value.replicate_without_data()}') if ((value._data is None and self._data is not None) or (self._data is None and value._data is not None)): raise ValueError('cannot compare: one frame has data and the other ' 'does not') return self._data == value._data def __ne__(self, value): return np.logical_not(self == value) def separation(self, other): """ Computes on-sky separation between this coordinate and another. .. note:: If the ``other`` coordinate object is in a different frame, it is first transformed to the frame of this object. This can lead to unintuitive behavior if not accounted for. Particularly of note is that ``self.separation(other)`` and ``other.separation(self)`` may not give the same answer in this case. Parameters ---------- other : `~astropy.coordinates.BaseCoordinateFrame` The coordinate to get the separation to. Returns ------- sep : `~astropy.coordinates.Angle` The on-sky separation between this and the ``other`` coordinate. Notes ----- The separation is calculated using the Vincenty formula, which is stable at all locations, including poles and antipodes [1]_. .. [1] https://en.wikipedia.org/wiki/Great-circle_distance """ from .angle_utilities import angular_separation from .angles import Angle self_unit_sph = self.represent_as(r.UnitSphericalRepresentation) other_transformed = other.transform_to(self) other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation) # Get the separation as a Quantity, convert to Angle in degrees sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat, other_unit_sph.lon, other_unit_sph.lat) return Angle(sep, unit=u.degree) def separation_3d(self, other): """ Computes three dimensional separation between this coordinate and another. Parameters ---------- other : `~astropy.coordinates.BaseCoordinateFrame` The coordinate system to get the distance to. Returns ------- sep : `~astropy.coordinates.Distance` The real-space distance between these two coordinates. Raises ------ ValueError If this or the other coordinate do not have distances. """ from .distances import Distance if issubclass(self.data.__class__, r.UnitSphericalRepresentation): raise ValueError('This object does not have a distance; cannot ' 'compute 3d separation.') # do this first just in case the conversion somehow creates a distance other_in_self_system = other.transform_to(self) if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation): raise ValueError('The other object does not have a distance; ' 'cannot compute 3d separation.') # drop the differentials to ensure they don't do anything odd in the # subtraction self_car = self.data.without_differentials().represent_as(r.CartesianRepresentation) other_car = other_in_self_system.data.without_differentials().represent_as(r.CartesianRepresentation) dist = (self_car - other_car).norm() if dist.unit == u.one: return dist else: return Distance(dist) @property def cartesian(self): """ Shorthand for a cartesian representation of the coordinates in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('cartesian', in_frame_units=True) @property def cylindrical(self): """ Shorthand for a cylindrical representation of the coordinates in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('cylindrical', in_frame_units=True) @property def spherical(self): """ Shorthand for a spherical representation of the coordinates in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('spherical', in_frame_units=True) @property def sphericalcoslat(self): """ Shorthand for a spherical representation of the positional data and a `SphericalCosLatDifferential` for the velocity data in this object. """ # TODO: if representations are updated to use a full transform graph, # the representation aliases should not be hard-coded like this return self.represent_as('spherical', 'sphericalcoslat', in_frame_units=True) @property def velocity(self): """ Shorthand for retrieving the Cartesian space-motion as a `CartesianDifferential` object. This is equivalent to calling ``self.cartesian.differentials['s']``. """ if 's' not in self.data.differentials: raise ValueError('Frame has no associated velocity (Differential) ' 'data information.') return self.cartesian.differentials['s'] @property def proper_motion(self): """ Shorthand for the two-dimensional proper motion as a `~astropy.units.Quantity` object with angular velocity units. In the returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude dimension so that ``.proper_motion[0]`` is the longitudinal proper motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper motion already includes the cos(latitude) term. """ if 's' not in self.data.differentials: raise ValueError('Frame has no associated velocity (Differential) ' 'data information.') sph = self.represent_as('spherical', 'sphericalcoslat', in_frame_units=True) pm_lon = sph.differentials['s'].d_lon_coslat pm_lat = sph.differentials['s'].d_lat return np.stack((pm_lon.value, pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit @property def radial_velocity(self): """ Shorthand for the radial or line-of-sight velocity as a `~astropy.units.Quantity` object. """ if 's' not in self.data.differentials: raise ValueError('Frame has no associated velocity (Differential) ' 'data information.') sph = self.represent_as('spherical', in_frame_units=True) return sph.differentials['s'].d_distance class GenericFrame(BaseCoordinateFrame): """ A frame object that can't store data but can hold any arbitrary frame attributes. Mostly useful as a utility for the high-level class to store intermediate frame attributes. Parameters ---------- frame_attrs : dict A dictionary of attributes to be used as the frame attributes for this frame. """ name = None # it's not a "real" frame so it doesn't have a name def __init__(self, frame_attrs): self.frame_attributes = {} for name, default in frame_attrs.items(): self.frame_attributes[name] = Attribute(default) setattr(self, '_' + name, default) super().__init__(None) def __getattr__(self, name): if '_' + name in self.__dict__: return getattr(self, '_' + name) else: raise AttributeError(f'no {name}') def __setattr__(self, name, value): if name in self.get_frame_attr_names(): raise AttributeError(f"can't set frame attribute '{name}'") else: super().__setattr__(name, value)
80f6aa6cc71f36ed9e842c1f19a3ccf7a37eb66d04d56c5bb09f22082a046465
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains convenience functions implementing some of the algorithms contained within Jean Meeus, 'Astronomical Algorithms', second edition, 1998, Willmann-Bell. """ import numpy as np from numpy.polynomial.polynomial import polyval import erfa from astropy.utils import deprecated from astropy import units as u from . import ICRS, SkyCoord, GeocentricTrueEcliptic from .builtin_frames.utils import get_jd12 __all__ = ["calc_moon"] # Meeus 1998: table 47.A # D M M' F l r _MOON_L_R = ( (0, 0, 1, 0, 6288774, -20905355), (2, 0, -1, 0, 1274027, -3699111), (2, 0, 0, 0, 658314, -2955968), (0, 0, 2, 0, 213618, -569925), (0, 1, 0, 0, -185116, 48888), (0, 0, 0, 2, -114332, -3149), (2, 0, -2, 0, 58793, 246158), (2, -1, -1, 0, 57066, -152138), (2, 0, 1, 0, 53322, -170733), (2, -1, 0, 0, 45758, -204586), (0, 1, -1, 0, -40923, -129620), (1, 0, 0, 0, -34720, 108743), (0, 1, 1, 0, -30383, 104755), (2, 0, 0, -2, 15327, 10321), (0, 0, 1, 2, -12528, 0), (0, 0, 1, -2, 10980, 79661), (4, 0, -1, 0, 10675, -34782), (0, 0, 3, 0, 10034, -23210), (4, 0, -2, 0, 8548, -21636), (2, 1, -1, 0, -7888, 24208), (2, 1, 0, 0, -6766, 30824), (1, 0, -1, 0, -5163, -8379), (1, 1, 0, 0, 4987, -16675), (2, -1, 1, 0, 4036, -12831), (2, 0, 2, 0, 3994, -10445), (4, 0, 0, 0, 3861, -11650), (2, 0, -3, 0, 3665, 14403), (0, 1, -2, 0, -2689, -7003), (2, 0, -1, 2, -2602, 0), (2, -1, -2, 0, 2390, 10056), (1, 0, 1, 0, -2348, 6322), (2, -2, 0, 0, 2236, -9884), (0, 1, 2, 0, -2120, 5751), (0, 2, 0, 0, -2069, 0), (2, -2, -1, 0, 2048, -4950), (2, 0, 1, -2, -1773, 4130), (2, 0, 0, 2, -1595, 0), (4, -1, -1, 0, 1215, -3958), (0, 0, 2, 2, -1110, 0), (3, 0, -1, 0, -892, 3258), (2, 1, 1, 0, -810, 2616), (4, -1, -2, 0, 759, -1897), (0, 2, -1, 0, -713, -2117), (2, 2, -1, 0, -700, 2354), (2, 1, -2, 0, 691, 0), (2, -1, 0, -2, 596, 0), (4, 0, 1, 0, 549, -1423), (0, 0, 4, 0, 537, -1117), (4, -1, 0, 0, 520, -1571), (1, 0, -2, 0, -487, -1739), (2, 1, 0, -2, -399, 0), (0, 0, 2, -2, -381, -4421), (1, 1, 1, 0, 351, 0), (3, 0, -2, 0, -340, 0), (4, 0, -3, 0, 330, 0), (2, -1, 2, 0, 327, 0), (0, 2, 1, 0, -323, 1165), (1, 1, -1, 0, 299, 0), (2, 0, 3, 0, 294, 0), (2, 0, -1, -2, 0, 8752) ) # Meeus 1998: table 47.B # D M M' F b _MOON_B = ( (0, 0, 0, 1, 5128122), (0, 0, 1, 1, 280602), (0, 0, 1, -1, 277693), (2, 0, 0, -1, 173237), (2, 0, -1, 1, 55413), (2, 0, -1, -1, 46271), (2, 0, 0, 1, 32573), (0, 0, 2, 1, 17198), (2, 0, 1, -1, 9266), (0, 0, 2, -1, 8822), (2, -1, 0, -1, 8216), (2, 0, -2, -1, 4324), (2, 0, 1, 1, 4200), (2, 1, 0, -1, -3359), (2, -1, -1, 1, 2463), (2, -1, 0, 1, 2211), (2, -1, -1, -1, 2065), (0, 1, -1, -1, -1870), (4, 0, -1, -1, 1828), (0, 1, 0, 1, -1794), (0, 0, 0, 3, -1749), (0, 1, -1, 1, -1565), (1, 0, 0, 1, -1491), (0, 1, 1, 1, -1475), (0, 1, 1, -1, -1410), (0, 1, 0, -1, -1344), (1, 0, 0, -1, -1335), (0, 0, 3, 1, 1107), (4, 0, 0, -1, 1021), (4, 0, -1, 1, 833), # second column (0, 0, 1, -3, 777), (4, 0, -2, 1, 671), (2, 0, 0, -3, 607), (2, 0, 2, -1, 596), (2, -1, 1, -1, 491), (2, 0, -2, 1, -451), (0, 0, 3, -1, 439), (2, 0, 2, 1, 422), (2, 0, -3, -1, 421), (2, 1, -1, 1, -366), (2, 1, 0, 1, -351), (4, 0, 0, 1, 331), (2, -1, 1, 1, 315), (2, -2, 0, -1, 302), (0, 0, 1, 3, -283), (2, 1, 1, -1, -229), (1, 1, 0, -1, 223), (1, 1, 0, 1, 223), (0, 1, -2, -1, -220), (2, 1, -1, -1, -220), (1, 0, 1, 1, -185), (2, -1, -2, -1, 181), (0, 1, 2, 1, -177), (4, 0, -2, -1, 176), (4, -1, -1, -1, 166), (1, 0, 1, -1, -164), (4, 0, 1, -1, 132), (1, 0, -1, -1, -119), (4, -1, 0, -1, 115), (2, -2, 0, 1, 107) ) """ Coefficients of polynomials for various terms: Lc : Mean longitude of Moon, w.r.t mean Equinox of date D : Mean elongation of the Moon M: Sun's mean anomaly Mc : Moon's mean anomaly F : Moon's argument of latitude (mean distance of Moon from its ascending node). """ _coLc = (2.18316448e+02, 4.81267881e+05, -1.57860000e-03, 1.85583502e-06, -1.53388349e-08) _coD = (2.97850192e+02, 4.45267111e+05, -1.88190000e-03, 1.83194472e-06, -8.84447000e-09) _coM = (3.57529109e+02, 3.59990503e+04, -1.53600000e-04, 4.08329931e-08) _coMc = (1.34963396e+02, 4.77198868e+05, 8.74140000e-03, 1.43474081e-05, -6.79717238e-08) _coF = (9.32720950e+01, 4.83202018e+05, -3.65390000e-03, -2.83607487e-07, 1.15833246e-09) _coA1 = (119.75, 131.849) _coA2 = (53.09, 479264.290) _coA3 = (313.45, 481266.484) _coE = (1.0, -0.002516, -0.0000074) @deprecated(since="5.0", alternative="astropy.coordinates.get_moon", message=("The private calc_moon function has been deprecated, " "as its functionality is now available in ERFA. " "Note that the coordinate system was not interpreted " "quite correctly, leading to small inaccuracies. Please " "use the public get_moon or get_body functions instead.")) def calc_moon(t): """ Lunar position model ELP2000-82 of (Chapront-Touze' and Chapront, 1983, 124, 50) This is the simplified version of Jean Meeus, Astronomical Algorithms, second edition, 1998, Willmann-Bell. Meeus claims approximate accuracy of 10" in longitude and 4" in latitude, with no specified time range. Tests against JPL ephemerides show accuracy of 10 arcseconds and 50 km over the date range CE 1950-2050. Parameters ---------- t : `~astropy.time.Time` Time of observation. Returns ------- skycoord : `~astropy.coordinates.SkyCoord` ICRS Coordinate for the body """ # number of centuries since J2000.0. # This should strictly speaking be in Ephemeris Time, but TDB or TT # will introduce error smaller than intrinsic accuracy of algorithm. T = (t.tdb.jyear-2000.0)/100. # constants that are needed for all calculations Lc = u.Quantity(polyval(T, _coLc), u.deg) D = u.Quantity(polyval(T, _coD), u.deg) M = u.Quantity(polyval(T, _coM), u.deg) Mc = u.Quantity(polyval(T, _coMc), u.deg) F = u.Quantity(polyval(T, _coF), u.deg) A1 = u.Quantity(polyval(T, _coA1), u.deg) A2 = u.Quantity(polyval(T, _coA2), u.deg) A3 = u.Quantity(polyval(T, _coA3), u.deg) E = polyval(T, _coE) suml = sumr = 0.0 for DNum, MNum, McNum, FNum, LFac, RFac in _MOON_L_R: corr = E ** abs(MNum) suml += LFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum) sumr += RFac*corr*np.cos(D*DNum+M*MNum+Mc*McNum+F*FNum) sumb = 0.0 for DNum, MNum, McNum, FNum, BFac in _MOON_B: corr = E ** abs(MNum) sumb += BFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum) suml += (3958*np.sin(A1) + 1962*np.sin(Lc-F) + 318*np.sin(A2)) sumb += (-2235*np.sin(Lc) + 382*np.sin(A3) + 175*np.sin(A1-F) + 175*np.sin(A1+F) + 127*np.sin(Lc-Mc) - 115*np.sin(Lc+Mc)) # ensure units suml = suml*u.microdegree sumb = sumb*u.microdegree # nutation of longitude jd1, jd2 = get_jd12(t, 'tt') nut, _ = erfa.nut06a(jd1, jd2) nut = nut*u.rad # calculate ecliptic coordinates lon = Lc + suml + nut lat = sumb dist = (385000.56+sumr/1000)*u.km # Meeus algorithm gives GeocentricTrueEcliptic coordinates ecliptic_coo = GeocentricTrueEcliptic(lon, lat, distance=dist, obstime=t, equinox=t) return SkyCoord(ecliptic_coo.transform_to(ICRS()))
ad01e01313b4c24268f58b0e41ca05736d23bdc45822188472a670a736e4b5ac
# Licensed under a 3-clause BSD style license - see LICENSE.rst from warnings import warn import collections import socket import json import urllib.request import urllib.error import urllib.parse import numpy as np import erfa from astropy import units as u from astropy import constants as consts from astropy.units.quantity import QuantityInfoBase from astropy.utils import data from astropy.utils.decorators import format_doc from astropy.utils.exceptions import AstropyUserWarning from .angles import Angle, Longitude, Latitude from .representation import (BaseRepresentation, CartesianRepresentation, CartesianDifferential) from .matrix_utilities import matrix_transpose from .errors import UnknownSiteException __all__ = ['EarthLocation', 'BaseGeodeticRepresentation', 'WGS84GeodeticRepresentation', 'WGS72GeodeticRepresentation', 'GRS80GeodeticRepresentation'] GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height']) ELLIPSOIDS = {} """Available ellipsoids (defined in erfam.h, with numbers exposed in erfa).""" # Note: they get filled by the creation of the geodetic classes. OMEGA_EARTH = ((1.002_737_811_911_354_48 * u.cycle/u.day) .to(1/u.s, u.dimensionless_angles())) """ Rotational velocity of Earth, following SOFA's pvtob. In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value in SI seconds, so multiply by the ratio of stellar to solar day. See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth Seidelmann (1992), University Science Books. The constant is the conventional, exact one (IERS conventions 2003); see http://hpiers.obspm.fr/eop-pc/index.php?index=constants. """ def _check_ellipsoid(ellipsoid=None, default='WGS84'): if ellipsoid is None: ellipsoid = default if ellipsoid not in ELLIPSOIDS: raise ValueError(f'Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})') return ellipsoid def _get_json_result(url, err_str, use_google): # need to do this here to prevent a series of complicated circular imports from .name_resolve import NameResolveError try: # Retrieve JSON response from Google maps API resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout) resp_data = json.loads(resp.read().decode('utf8')) except urllib.error.URLError as e: # This catches a timeout error, see: # http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python if isinstance(e.reason, socket.timeout): raise NameResolveError(err_str.format(msg="connection timed out")) from e else: raise NameResolveError(err_str.format(msg=e.reason)) from e except socket.timeout: # There are some cases where urllib2 does not catch socket.timeout # especially while receiving response data on an already previously # working request raise NameResolveError(err_str.format(msg="connection timed out")) if use_google: results = resp_data.get('results', []) if resp_data.get('status', None) != 'OK': raise NameResolveError(err_str.format(msg="unknown failure with " "Google API")) else: # OpenStreetMap returns a list results = resp_data if not results: raise NameResolveError(err_str.format(msg="no results returned")) return results class EarthLocationInfo(QuantityInfoBase): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ _represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid') def _construct_from_dict(self, map): # Need to pop ellipsoid off and update post-instantiation. This is # on the to-fix list in #4261. ellipsoid = map.pop('ellipsoid') out = self._parent_cls(**map) out.ellipsoid = ellipsoid return out def new_like(self, cols, length, metadata_conflicts='warn', name=None): """ Return a new EarthLocation instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : EarthLocation (or subclass) Empty instance of this class consistent with ``cols`` """ # Very similar to QuantityInfo.new_like, but the creation of the # map is different enough that this needs its own rouinte. # Get merged info attributes shape, dtype, format, description. attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, ('meta', 'format', 'description')) # The above raises an error if the dtypes do not match, but returns # just the string representation, which is not useful, so remove. attrs.pop('dtype') # Make empty EarthLocation using the dtype and unit of the last column. # Use zeros so we do not get problems for possible conversion to # geodetic coordinates. shape = (length,) + attrs.pop('shape') data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False) # Get arguments needed to reconstruct class map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key)) for key in self._represent_as_dict_attrs} out = self._construct_from_dict(map) # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out class EarthLocation(u.Quantity): """ Location on the Earth. Initialization is first attempted assuming geocentric (x, y, z) coordinates are given; if that fails, another attempt is made assuming geodetic coordinates (longitude, latitude, height above a reference ellipsoid). When using the geodetic forms, Longitudes are measured increasing to the east, so west longitudes are negative. Internally, the coordinates are stored as geocentric. To ensure a specific type of coordinates is used, use the corresponding class methods (`from_geocentric` and `from_geodetic`) or initialize the arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``, ``height`` for geodetic). See the class methods for details. Notes ----- This class fits into the coordinates transformation framework in that it encodes a position on the `~astropy.coordinates.ITRS` frame. To get a proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs`` property. """ _ellipsoid = 'WGS84' _location_dtype = np.dtype({'names': ['x', 'y', 'z'], 'formats': [np.float64]*3}) _array_dtype = np.dtype((np.float64, (3,))) info = EarthLocationInfo() def __new__(cls, *args, **kwargs): # TODO: needs copy argument and better dealing with inputs. if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation)): return args[0].copy() try: self = cls.from_geocentric(*args, **kwargs) except (u.UnitsError, TypeError) as exc_geocentric: try: self = cls.from_geodetic(*args, **kwargs) except Exception as exc_geodetic: raise TypeError('Coordinates could not be parsed as either ' 'geocentric or geodetic, with respective ' 'exceptions "{}" and "{}"' .format(exc_geocentric, exc_geodetic)) return self @classmethod def from_geocentric(cls, x, y, z, unit=None): """ Location on Earth, initialized from geocentric coordinates. Parameters ---------- x, y, z : `~astropy.units.Quantity` or array-like Cartesian coordinates. If not quantities, ``unit`` should be given. unit : unit-like or None Physical unit of the coordinate values. If ``x``, ``y``, and/or ``z`` are quantities, they will be converted to this unit. Raises ------ astropy.units.UnitsError If the units on ``x``, ``y``, and ``z`` do not match or an invalid unit is given. ValueError If the shapes of ``x``, ``y``, and ``z`` do not match. TypeError If ``x`` is not a `~astropy.units.Quantity` and no unit is given. """ if unit is None: try: unit = x.unit except AttributeError: raise TypeError("Geocentric coordinates should be Quantities " "unless an explicit unit is given.") from None else: unit = u.Unit(unit) if unit.physical_type != 'length': raise u.UnitsError("Geocentric coordinates should be in " "units of length.") try: x = u.Quantity(x, unit, copy=False) y = u.Quantity(y, unit, copy=False) z = u.Quantity(z, unit, copy=False) except u.UnitsError: raise u.UnitsError("Geocentric coordinate units should all be " "consistent.") x, y, z = np.broadcast_arrays(x, y, z) struc = np.empty(x.shape, cls._location_dtype) struc['x'], struc['y'], struc['z'] = x, y, z return super().__new__(cls, struc, unit, copy=False) @classmethod def from_geodetic(cls, lon, lat, height=0., ellipsoid=None): """ Location on Earth, initialized from geodetic coordinates. Parameters ---------- lon : `~astropy.coordinates.Longitude` or float Earth East longitude. Can be anything that initialises an `~astropy.coordinates.Angle` object (if float, in degrees). lat : `~astropy.coordinates.Latitude` or float Earth latitude. Can be anything that initialises an `~astropy.coordinates.Latitude` object (if float, in degrees). height : `~astropy.units.Quantity` ['length'] or float, optional Height above reference ellipsoid (if float, in meters; default: 0). ellipsoid : str, optional Name of the reference ellipsoid to use (default: 'WGS84'). Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'. Raises ------ astropy.units.UnitsError If the units on ``lon`` and ``lat`` are inconsistent with angular ones, or that on ``height`` with a length. ValueError If ``lon``, ``lat``, and ``height`` do not have the same shape, or if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geocentric coordinates, the ERFA routine ``gd2gc`` is used. See https://github.com/liberfa/erfa """ ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid) # As wrapping fails on readonly input, we do so manually lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree) lat = Latitude(lat, u.degree, copy=False) # don't convert to m by default, so we can use the height unit below. if not isinstance(height, u.Quantity): height = u.Quantity(height, u.m, copy=False) # get geocentric coordinates. geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False) xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape) self._ellipsoid = ellipsoid return self @classmethod def of_site(cls, site_name): """ Return an object of this class for a known observatory/site by name. This is intended as a quick convenience function to get basic site information, not a fully-featured exhaustive registry of observatories and all their properties. Additional information about the site is stored in the ``.info.meta`` dictionary of sites obtained using this method (see the examples below). .. note:: When this function is called, it will attempt to download site information from the astropy data server. If you would like a site to be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . If a site cannot be found in the registry (i.e., an internet connection is not available), it will fall back on a built-in list, In the future, this bundled list might include a version-controlled list of canonical observatories extracted from the online version, but it currently only contains the Greenwich Royal Observatory as an example case. Parameters ---------- site_name : str Name of the observatory (case-insensitive). Returns ------- site : `~astropy.coordinates.EarthLocation` (or subclass) instance The location of the observatory. The returned class will be the same as this class. Examples -------- >>> from astropy.coordinates import EarthLocation >>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA >>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>) >>> keck.info # doctest: +REMOTE_DATA name = W. M. Keck Observatory dtype = void192 unit = m class = EarthLocation n_bad = 0 >>> keck.info.meta # doctest: +REMOTE_DATA {'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'} See Also -------- get_site_names : the list of sites that this function can access """ # noqa registry = cls._get_site_registry() try: el = registry[site_name] except UnknownSiteException as e: raise UnknownSiteException(e.site, 'EarthLocation.get_site_names', close_names=e.close_names) from e if cls is el.__class__: return el else: newel = cls.from_geodetic(*el.to_geodetic()) newel.info.name = el.info.name return newel @classmethod def of_address(cls, address, get_height=False, google_api_key=None): """ Return an object of this class for a given address by querying either the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding API [2]_, which requires a specified API key. This is intended as a quick convenience function to get easy access to locations. If you need to specify a precise location, you should use the initializer directly and pass in a longitude, latitude, and elevation. In the background, this just issues a web query to either of the APIs noted above. This is not meant to be abused! Both OpenStreetMap and Google use IP-based query limiting and will ban your IP if you send more than a few thousand queries per hour [2]_. .. warning:: If the query returns more than one location (e.g., searching on ``address='springfield'``), this function will use the **first** returned location. Parameters ---------- address : str The address to get the location for. As per the Google maps API, this can be a fully specified street address (e.g., 123 Main St., New York, NY) or a city name (e.g., Danbury, CT), or etc. get_height : bool, optional This only works when using the Google API! See the ``google_api_key`` block below. Use the retrieved location to perform a second query to the Google maps elevation API to retrieve the height of the input address [3]_. google_api_key : str, optional A Google API key with the Geocoding API and (optionally) the elevation API enabled. See [4]_ for more information. Returns ------- location : `~astropy.coordinates.EarthLocation` (or subclass) instance The location of the input address. Will be type(this class) References ---------- .. [1] https://nominatim.openstreetmap.org/ .. [2] https://developers.google.com/maps/documentation/geocoding/start .. [3] https://developers.google.com/maps/documentation/elevation/start .. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key """ use_google = google_api_key is not None # Fail fast if invalid options are passed: if not use_google and get_height: raise ValueError( 'Currently, `get_height` only works when using ' 'the Google geocoding API, which requires passing ' 'a Google API key with `google_api_key`. See: ' 'https://developers.google.com/maps/documentation/geocoding/get-api-key ' 'for information on obtaining an API key.') if use_google: # Google pars = urllib.parse.urlencode({'address': address, 'key': google_api_key}) geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}" else: # OpenStreetMap pars = urllib.parse.urlencode({'q': address, 'format': 'json'}) geo_url = f"https://nominatim.openstreetmap.org/search?{pars}" # get longitude and latitude location err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}" geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google) if use_google: loc = geo_result[0]['geometry']['location'] lat = loc['lat'] lon = loc['lng'] else: loc = geo_result[0] lat = float(loc['lat']) # strings are returned by OpenStreetMap lon = float(loc['lon']) if get_height: pars = {'locations': f'{lat:.8f},{lon:.8f}', 'key': google_api_key} pars = urllib.parse.urlencode(pars) ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}" err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}" ele_result = _get_json_result(ele_url, err_str=err_str, use_google=use_google) height = ele_result[0]['elevation']*u.meter else: height = 0. return cls.from_geodetic(lon=lon*u.deg, lat=lat*u.deg, height=height) @classmethod def get_site_names(cls): """ Get list of names of observatories for use with `~astropy.coordinates.EarthLocation.of_site`. .. note:: When this function is called, it will first attempt to download site information from the astropy data server. If it cannot (i.e., an internet connection is not available), it will fall back on the list included with astropy (which is a limited and dated set of sites). If you think a site should be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . Returns ------- names : list of str List of valid observatory names See Also -------- of_site : Gets the actual location object for one of the sites names this returns. """ return cls._get_site_registry().names @classmethod def _get_site_registry(cls, force_download=False, force_builtin=False): """ Gets the site registry. The first time this either downloads or loads from the data file packaged with astropy. Subsequent calls will use the cached version unless explicitly overridden. Parameters ---------- force_download : bool or str If not False, force replacement of the cached registry with a downloaded version. If a str, that will be used as the URL to download from (if just True, the default URL will be used). force_builtin : bool If True, load from the data file bundled with astropy and set the cache to that. Returns ------- reg : astropy.coordinates.sites.SiteRegistry """ # need to do this here at the bottom to avoid circular dependencies from .sites import get_builtin_sites, get_downloaded_sites if force_builtin and force_download: raise ValueError('Cannot have both force_builtin and force_download True') if force_builtin: reg = cls._site_registry = get_builtin_sites() else: reg = getattr(cls, '_site_registry', None) if force_download or not reg: try: if isinstance(force_download, str): reg = get_downloaded_sites(force_download) else: reg = get_downloaded_sites() except OSError: if force_download: raise msg = ('Could not access the online site list. Falling ' 'back on the built-in version, which is rather ' 'limited. If you want to retry the download, do ' '{0}._get_site_registry(force_download=True)') warn(AstropyUserWarning(msg.format(cls.__name__))) reg = get_builtin_sites() cls._site_registry = reg return reg @property def ellipsoid(self): """The default ellipsoid used to convert to geodetic coordinates.""" return self._ellipsoid @ellipsoid.setter def ellipsoid(self, ellipsoid): self._ellipsoid = _check_ellipsoid(ellipsoid) @property def geodetic(self): """Convert to geodetic coordinates for the default ellipsoid.""" return self.to_geodetic() def to_geodetic(self, ellipsoid=None): """Convert to geodetic coordinates. Parameters ---------- ellipsoid : str, optional Reference ellipsoid to use. Default is the one the coordinates were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72' Returns ------- lon, lat, height : `~astropy.units.Quantity` The tuple is a ``GeodeticLocation`` namedtuple and is comprised of instances of `~astropy.coordinates.Longitude`, `~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`. Raises ------ ValueError if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geodetic coordinates, the ERFA routine ``gc2gd`` is used. See https://github.com/liberfa/erfa """ ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid) xyz = self.view(self._array_dtype, u.Quantity) llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as( ELLIPSOIDS[ellipsoid]) return GeodeticLocation( Longitude(llh.lon, u.deg, wrap_angle=180*u.deg, copy=False), llh.lat << u.deg, llh.height << self.unit) @property def lon(self): """Longitude of the location, for the default ellipsoid.""" return self.geodetic[0] @property def lat(self): """Latitude of the location, for the default ellipsoid.""" return self.geodetic[1] @property def height(self): """Height of the location, for the default ellipsoid.""" return self.geodetic[2] # mostly for symmetry with geodetic and to_geodetic. @property def geocentric(self): """Convert to a tuple with X, Y, and Z as quantities""" return self.to_geocentric() def to_geocentric(self): """Convert to a tuple with X, Y, and Z as quantities""" return (self.x, self.y, self.z) def get_itrs(self, obstime=None): """ Generates an `~astropy.coordinates.ITRS` object with the location of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` or None The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or if None, the default ``obstime`` will be used. Returns ------- itrs : `~astropy.coordinates.ITRS` The new object in the ITRS frame """ # Broadcast for a single position at multiple times, but don't attempt # to be more general here. if obstime and self.size == 1 and obstime.shape: self = np.broadcast_to(self, obstime.shape, subok=True) # do this here to prevent a series of complicated circular imports from .builtin_frames import ITRS return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime) itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with for the location of this object at the default ``obstime``.""") def get_gcrs(self, obstime): """GCRS position with velocity at ``obstime`` as a GCRS coordinate. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns ------- gcrs : `~astropy.coordinates.GCRS` instance With velocity included. """ # do this here to prevent a series of complicated circular imports from .builtin_frames import GCRS loc, vel = self.get_gcrs_posvel(obstime) loc.differentials['s'] = CartesianDifferential.from_cartesian(vel) return GCRS(loc, obstime=obstime) def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref): """Calculate GCRS position and velocity given transformation matrices. The reference frame z axis must point to the Celestial Intermediate Pole (as is the case for CIRS and TETE). This private method is used in intermediate_rotation_transforms, where some of the matrices are already available for the coordinate transformation. The method is faster by an order of magnitude than just adding a zero velocity to ITRS and transforming to GCRS, because it avoids calculating the velocity via finite differencing of the results of the transformation at three separate times. """ # The simplest route is to transform to the reference frame where the # z axis is properly aligned with the Earth's rotation axis (CIRS or # TETE), then calculate the velocity, and then transform this # reference position and velocity to GCRS. For speed, though, we # transform the coordinates to GCRS in one step, and calculate the # velocities by rotating around the earth's axis transformed to GCRS. ref_to_gcrs = matrix_transpose(gcrs_to_ref) itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs) # Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH), # so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH. rot_vec_gcrs = CartesianRepresentation(ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False) # Get the position in the GCRS frame. # Since we just need the cartesian representation of ITRS, avoid get_itrs(). itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False) pos = itrs_cart.transform(itrs_to_gcrs) vel = rot_vec_gcrs.cross(pos) return pos, vel def get_gcrs_posvel(self, obstime): """ Calculate the GCRS position and velocity of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns ------- obsgeoloc : `~astropy.coordinates.CartesianRepresentation` The GCRS position of the object obsgeovel : `~astropy.coordinates.CartesianRepresentation` The GCRS velocity of the object """ # Local import to prevent circular imports. from .builtin_frames.intermediate_rotation_transforms import ( cirs_to_itrs_mat, gcrs_to_cirs_mat) # Get gcrs_posvel by transforming via CIRS (slightly faster than TETE). return self._get_gcrs_posvel(obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)) def gravitational_redshift(self, obstime, bodies=['sun', 'jupiter', 'moon'], masses={}): """Return the gravitational redshift at this EarthLocation. Calculates the gravitational redshift, of order 3 m/s, due to the requested solar system bodies. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the redshift at. bodies : iterable, optional The bodies (other than the Earth) to include in the redshift calculation. List elements should be any body name `get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and the Moon. Earth is always included (because the class represents an *Earth* location). masses : dict[str, `~astropy.units.Quantity`], optional The mass or gravitational parameters (G * mass) to assume for the bodies requested in ``bodies``. Can be used to override the defaults for the Sun, Jupiter, the Moon, and the Earth, or to pass in masses for other bodies. Returns ------- redshift : `~astropy.units.Quantity` Gravitational redshift in velocity units at given obstime. """ # needs to be here to avoid circular imports from .solar_system import get_body_barycentric bodies = list(bodies) # Ensure earth is included and last in the list. if 'earth' in bodies: bodies.remove('earth') bodies.append('earth') _masses = {'sun': consts.GM_sun, 'jupiter': consts.GM_jup, 'moon': consts.G * 7.34767309e22*u.kg, 'earth': consts.GM_earth} _masses.update(masses) GMs = [] M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg)) for body in bodies: try: GMs.append(_masses[body].to(u.m**3/u.s**2, [M_GM_equivalency])) except KeyError as err: raise KeyError(f'body "{body}" does not have a mass.') from err except u.UnitsError as exc: exc.args += ('"masses" argument values must be masses or ' 'gravitational parameters.',) raise positions = [get_body_barycentric(name, obstime) for name in bodies] # Calculate distances to objects other than earth. distances = [(pos - positions[-1]).norm() for pos in positions[:-1]] # Append distance from Earth's center for Earth's contribution. distances.append(CartesianRepresentation(self.geocentric).norm()) # Get redshifts due to all objects. redshifts = [-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)] # Reverse order of summing, to go from small to big, and to get # "earth" first, which gives m/s as unit. return sum(redshifts[::-1]) @property def x(self): """The X component of the geocentric coordinates.""" return self['x'] @property def y(self): """The Y component of the geocentric coordinates.""" return self['y'] @property def z(self): """The Z component of the geocentric coordinates.""" return self['z'] def __getitem__(self, item): result = super().__getitem__(item) if result.dtype is self.dtype: return result.view(self.__class__) else: return result.view(u.Quantity) def __array_finalize__(self, obj): super().__array_finalize__(obj) if hasattr(obj, '_ellipsoid'): self._ellipsoid = obj._ellipsoid def __len__(self): if self.shape == (): raise IndexError('0-d EarthLocation arrays cannot be indexed') else: return super().__len__() def _to_value(self, unit, equivalencies=[]): """Helper method for to and to_value.""" # Conversion to another unit in both ``to`` and ``to_value`` goes # via this routine. To make the regular quantity routines work, we # temporarily turn the structured array into a regular one. array_view = self.view(self._array_dtype, np.ndarray) if equivalencies == []: equivalencies = self._equivalencies new_array = self.unit.to(unit, array_view, equivalencies=equivalencies) return new_array.view(self.dtype).reshape(self.shape) geodetic_base_doc = """{__doc__} Parameters ---------- lon, lat : angle-like The longitude and latitude of the point(s), in angular units. The latitude should be between -90 and 90 degrees, and the longitude will be wrapped to an angle between 0 and 360 degrees. These can also be instances of `~astropy.coordinates.Angle` and either `~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`, depending on the parameter. height : `~astropy.units.Quantity` ['length'] The height to the point(s). copy : bool, optional If `True` (default), arrays will be copied. If `False`, arrays will be references, though possibly broadcast to ensure matching shapes. """ @format_doc(geodetic_base_doc) class BaseGeodeticRepresentation(BaseRepresentation): """Base geodetic representation.""" attr_classes = {'lon': Longitude, 'lat': Latitude, 'height': u.Quantity} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if '_ellipsoid' in cls.__dict__: ELLIPSOIDS[cls._ellipsoid] = cls def __init__(self, lon, lat=None, height=None, copy=True): if height is None and not isinstance(lon, self.__class__): height = 0 << u.m super().__init__(lon, lat, height, copy=copy) if not self.height.unit.is_equivalent(u.m): raise u.UnitTypeError(f"{self.__class__.__name__} requires " f"height with units of length.") def to_cartesian(self): """ Converts WGS84 geodetic coordinates to 3D rectangular (geocentric) cartesian coordinates. """ xyz = erfa.gd2gc(getattr(erfa, self._ellipsoid), self.lon, self.lat, self.height) return CartesianRepresentation(xyz, xyz_axis=-1, copy=False) @classmethod def from_cartesian(cls, cart): """ Converts 3D rectangular cartesian coordinates (assumed geocentric) to WGS84 geodetic coordinates. """ lon, lat, height = erfa.gc2gd(getattr(erfa, cls._ellipsoid), cart.get_xyz(xyz_axis=-1)) return cls(lon, lat, height, copy=False) @format_doc(geodetic_base_doc) class WGS84GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in WGS84 3D geodetic coordinates.""" _ellipsoid = 'WGS84' @format_doc(geodetic_base_doc) class WGS72GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in WGS72 3D geodetic coordinates.""" _ellipsoid = 'WGS72' @format_doc(geodetic_base_doc) class GRS80GeodeticRepresentation(BaseGeodeticRepresentation): """Representation of points in GRS80 3D geodetic coordinates.""" _ellipsoid = 'GRS80'
0261f2c91bdd5183afce8a928e81b335af6709c0dc260d287ef0a253c18f0897
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains the fundamental classes used for representing coordinates in astropy. """ import warnings from collections import namedtuple import numpy as np from . import angle_formats as form from astropy import units as u from astropy.utils import isiterable __all__ = ['Angle', 'Latitude', 'Longitude'] # these are used by the `hms` and `dms` attributes hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's')) dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's')) signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's')) class Angle(u.SpecificTypeQuantity): """ One or more angular value(s) with units equivalent to radians or degrees. An angle can be specified either as an array, scalar, tuple (see below), string, `~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports a variety of formats. The examples below illustrate common ways of initializing an `Angle` object. First some imports:: >>> from astropy.coordinates import Angle >>> from astropy import units as u The angle values can now be provided:: >>> Angle('10.2345d') <Angle 10.2345 deg> >>> Angle(['10.2345d', '-20d']) <Angle [ 10.2345, -20. ] deg> >>> Angle('1:2:30.43 degrees') <Angle 1.04178611 deg> >>> Angle('1 2 0 hours') <Angle 1.03333333 hourangle> >>> Angle(np.arange(1, 8), unit=u.deg) <Angle [1., 2., 3., 4., 5., 6., 7.] deg> >>> Angle('1°2′3″') <Angle 1.03416667 deg> >>> Angle('1°2′3″N') <Angle 1.03416667 deg> >>> Angle('1d2m3.4s') <Angle 1.03427778 deg> >>> Angle('1d2m3.4sS') <Angle -1.03427778 deg> >>> Angle('-1h2m3s') <Angle -1.03416667 hourangle> >>> Angle('-1h2m3sE') <Angle -1.03416667 hourangle> >>> Angle('-1h2.5m') <Angle -1.04166667 hourangle> >>> Angle('-1h2.5mW') <Angle 1.04166667 hourangle> >>> Angle('-1:2.5', unit=u.deg) <Angle -1.04166667 deg> >>> Angle((10, 11, 12), unit='hourangle') # (h, m, s) <Angle 10.18666667 hourangle> >>> Angle((-1, 2, 3), unit=u.deg) # (d, m, s) <Angle -1.03416667 deg> >>> Angle(10.2345 * u.deg) <Angle 10.2345 deg> >>> Angle(Angle(10.2345 * u.deg)) <Angle 10.2345 deg> Parameters ---------- angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle` The angle value. If a tuple, will be interpreted as ``(h, m, s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described above. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like, optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. dtype : `~numpy.dtype`, optional See `~astropy.units.Quantity`. copy : bool, optional See `~astropy.units.Quantity`. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. """ _equivalent_unit = u.radian _include_easy_conversion_members = True def __new__(cls, angle, unit=None, dtype=None, copy=True, **kwargs): if not isinstance(angle, u.Quantity): if unit is not None: unit = cls._convert_unit_to_angle_unit(u.Unit(unit)) if isinstance(angle, tuple): angle = cls._tuple_to_float(angle, unit) elif isinstance(angle, str): angle, angle_unit = form.parse_angle(angle, unit) if angle_unit is None: angle_unit = unit if isinstance(angle, tuple): angle = cls._tuple_to_float(angle, angle_unit) if angle_unit is not unit: # Possible conversion to `unit` will be done below. angle = u.Quantity(angle, angle_unit, copy=False) elif (isiterable(angle) and not (isinstance(angle, np.ndarray) and angle.dtype.kind not in 'SUVO')): angle = [Angle(x, unit, copy=False) for x in angle] return super().__new__(cls, angle, unit, dtype=dtype, copy=copy, **kwargs) @staticmethod def _tuple_to_float(angle, unit): """ Converts an angle represented as a 3-tuple or 2-tuple into a floating point number in the given unit. """ # TODO: Numpy array of tuples? if unit == u.hourangle: return form.hms_to_hours(*angle) elif unit == u.degree: return form.dms_to_degrees(*angle) else: raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'") @staticmethod def _convert_unit_to_angle_unit(unit): return u.hourangle if unit is u.hour else unit def _set_unit(self, unit): super()._set_unit(self._convert_unit_to_angle_unit(unit)) @property def hour(self): """ The angle's value in hours (read-only property). """ return self.hourangle @property def hms(self): """ The angle's value in hours, as a named tuple with ``(h, m, s)`` members. (This is a read-only property.) """ return hms_tuple(*form.hours_to_hms(self.hourangle)) @property def dms(self): """ The angle's value in degrees, as a named tuple with ``(d, m, s)`` members. (This is a read-only property.) """ return dms_tuple(*form.degrees_to_dms(self.degree)) @property def signed_dms(self): """ The angle's value in degrees, as a named tuple with ``(sign, d, m, s)`` members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of the angle is given by ``sign``. (This is a read-only property.) This is primarily intended for use with `dms` to generate string representations of coordinates that are correct for negative angles. """ return signed_dms_tuple(np.sign(self.degree), *form.degrees_to_dms(np.abs(self.degree))) def to_string(self, unit=None, decimal=False, sep='fromunit', precision=None, alwayssign=False, pad=False, fields=3, format=None): """ A string representation of the angle. Parameters ---------- unit : `~astropy.units.UnitBase`, optional Specifies the unit. Must be an angular unit. If not provided, the unit used to initialize the angle will be used. decimal : bool, optional If `True`, a decimal representation will be used, otherwise the returned string will be in sexagesimal form. sep : str, optional The separator between numbers in a sexagesimal representation. E.g., if it is ':', the result is ``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g., ``sep='hms'`` would give the result ``'12h41m11.1241s'``, or sep='-:' would yield ``'11-21:17.124'``. Alternatively, the special string 'fromunit' means 'dms' if the unit is degrees, or 'hms' if the unit is hours. precision : int, optional The level of decimal precision. If ``decimal`` is `True`, this is the raw precision, otherwise it gives the precision of the last place of the sexagesimal representation (seconds). If `None`, or not provided, the number of decimal places is determined by the value, and will be between 0-8 decimal places as required. alwayssign : bool, optional If `True`, include the sign no matter what. If `False`, only include the sign if it is negative. pad : bool, optional If `True`, include leading zeros when needed to ensure a fixed number of characters for sexagesimal representation. fields : int, optional Specifies the number of fields to display when outputting sexagesimal notation. For example: - fields == 1: ``'5d'`` - fields == 2: ``'5d45m'`` - fields == 3: ``'5d45m32.5s'`` By default, all fields are displayed. format : str, optional The format of the result. If not provided, an unadorned string is returned. Supported values are: - 'latex': Return a LaTeX-formatted string - 'unicode': Return a string containing non-ASCII unicode characters, such as the degree symbol Returns ------- strrepr : str or array A string representation of the angle. If the angle is an array, this will be an array with a unicode dtype. """ if unit is None: unit = self.unit else: unit = self._convert_unit_to_angle_unit(u.Unit(unit)) separators = { None: { u.degree: 'dms', u.hourangle: 'hms'}, 'latex': { u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'], u.hourangle: [r'^{\mathrm{h}}', r'^{\mathrm{m}}', r'^{\mathrm{s}}']}, 'unicode': { u.degree: '°′″', u.hourangle: 'ʰᵐˢ'} } if sep == 'fromunit': if format not in separators: raise ValueError(f"Unknown format '{format}'") seps = separators[format] if unit in seps: sep = seps[unit] # Create an iterator so we can format each element of what # might be an array. if unit is u.degree: if decimal: values = self.degree if precision is not None: func = ("{0:0." + str(precision) + "f}").format else: func = '{:g}'.format else: if sep == 'fromunit': sep = 'dms' values = self.degree func = lambda x: form.degrees_to_string( x, precision=precision, sep=sep, pad=pad, fields=fields) elif unit is u.hourangle: if decimal: values = self.hour if precision is not None: func = ("{0:0." + str(precision) + "f}").format else: func = '{:g}'.format else: if sep == 'fromunit': sep = 'hms' values = self.hour func = lambda x: form.hours_to_string( x, precision=precision, sep=sep, pad=pad, fields=fields) elif unit.is_equivalent(u.radian): if decimal: values = self.to_value(unit) if precision is not None: func = ("{0:1." + str(precision) + "f}").format else: func = "{:g}".format elif sep == 'fromunit': values = self.to_value(unit) unit_string = unit.to_string(format=format) if format == 'latex': unit_string = unit_string[1:-1] if precision is not None: def plain_unit_format(val): return ("{0:0." + str(precision) + "f}{1}").format( val, unit_string) func = plain_unit_format else: def plain_unit_format(val): return f"{val:g}{unit_string}" func = plain_unit_format else: raise ValueError( f"'{unit.name}' can not be represented in sexagesimal notation") else: raise u.UnitsError( "The unit value provided is not an angular unit.") def do_format(val): # Check if value is not nan to avoid ValueErrors when turning it into # a hexagesimal string. if not np.isnan(val): s = func(float(val)) if alwayssign and not s.startswith('-'): s = '+' + s if format == 'latex': s = f'${s}$' return s s = f"{val}" return s format_ufunc = np.vectorize(do_format, otypes=['U']) result = format_ufunc(values) if result.ndim == 0: result = result[()] return result def _wrap_at(self, wrap_angle): """ Implementation that assumes ``angle`` is already validated and that wrapping is inplace. """ # Convert the wrap angle and 360 degrees to the native unit of # this Angle, then do all the math on raw Numpy arrays rather # than Quantity objects for speed. a360 = u.degree.to(self.unit, 360.0) wrap_angle = wrap_angle.to_value(self.unit) wrap_angle_floor = wrap_angle - a360 self_angle = self.view(np.ndarray) # Do the wrapping, but only if any angles need to be wrapped # # This invalid catch block is needed both for the floor division # and for the comparisons later on (latter not really needed # any more for >= 1.19 (NUMPY_LT_1_19), but former is). with np.errstate(invalid='ignore'): wraps = (self_angle - wrap_angle_floor) // a360 np.nan_to_num(wraps, copy=False) if np.any(wraps != 0): self_angle -= wraps*a360 # Rounding errors can cause problems. self_angle[self_angle >= wrap_angle] -= a360 self_angle[self_angle < wrap_angle_floor] += a360 def wrap_at(self, wrap_angle, inplace=False): """ Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``. This method forces all the angle values to be within a contiguous 360 degree range so that ``wrap_angle - 360d <= angle < wrap_angle``. By default a new Angle object is returned, but if the ``inplace`` argument is `True` then the `~astropy.coordinates.Angle` object is wrapped in place and nothing is returned. For instance:: >>> from astropy.coordinates import Angle >>> import astropy.units as u >>> a = Angle([-20.0, 150.0, 350.0] * u.deg) >>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP array([340., 150., 350.]) >>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP >>> a.degree # doctest: +FLOAT_CMP array([-20., 150., -10.]) Parameters ---------- wrap_angle : angle-like Specifies a single value for the wrap angle. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. inplace : bool If `True` then wrap the object in place instead of returning a new `~astropy.coordinates.Angle` Returns ------- out : Angle or None If ``inplace is False`` (default), return new `~astropy.coordinates.Angle` object with angles wrapped accordingly. Otherwise wrap in place and return `None`. """ wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle if not inplace: self = self.copy() self._wrap_at(wrap_angle) return None if inplace else self def is_within_bounds(self, lower=None, upper=None): """ Check if all angle(s) satisfy ``lower <= angle < upper`` If ``lower`` is not specified (or `None`) then no lower bounds check is performed. Likewise ``upper`` can be left unspecified. For example:: >>> from astropy.coordinates import Angle >>> import astropy.units as u >>> a = Angle([-20, 150, 350] * u.deg) >>> a.is_within_bounds('0d', '360d') False >>> a.is_within_bounds(None, '360d') True >>> a.is_within_bounds(-30 * u.deg, None) True Parameters ---------- lower : angle-like or None Specifies lower bound for checking. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. upper : angle-like or None Specifies upper bound for checking. This can be any object that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. Returns ------- is_within_bounds : bool `True` if all angles satisfy ``lower <= angle < upper`` """ ok = True if lower is not None: ok &= np.all(Angle(lower) <= self) if ok and upper is not None: ok &= np.all(self < Angle(upper)) return bool(ok) def _str_helper(self, format=None): if self.isscalar: return self.to_string(format=format) def formatter(x): return x.to_string(format=format) return np.array2string(self, formatter={'all': formatter}) def __str__(self): return self._str_helper() def _repr_latex_(self): return self._str_helper(format='latex') def _no_angle_subclass(obj): """Return any Angle subclass objects as an Angle objects. This is used to ensure that Latitude and Longitude change to Angle objects when they are used in calculations (such as lon/2.) """ if isinstance(obj, tuple): return tuple(_no_angle_subclass(_obj) for _obj in obj) return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj class Latitude(Angle): """ Latitude-like angle(s) which must be in the range -90 to +90 deg. A Latitude object is distinguished from a pure :class:`~astropy.coordinates.Angle` by virtue of being constrained so that:: -90.0 * u.deg <= angle(s) <= +90.0 * u.deg Any attempt to set a value outside that range will result in a `ValueError`. The input angle(s) can be specified either as an array, list, scalar, tuple (see below), string, :class:`~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports all of the input formats supported by :class:`~astropy.coordinates.Angle`. Parameters ---------- angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle` The angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described for :class:`~astropy.coordinates.Angle`. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like, optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. `TypeError` If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`. """ def __new__(cls, angle, unit=None, **kwargs): # Forbid creating a Lat from a Long. if isinstance(angle, Longitude): raise TypeError("A Latitude angle cannot be created from a Longitude angle") self = super().__new__(cls, angle, unit=unit, **kwargs) self._validate_angles() return self def _validate_angles(self, angles=None): """Check that angles are between -90 and 90 degrees. If not given, the check is done on the object itself""" # Convert the lower and upper bounds to the "native" unit of # this angle. This limits multiplication to two values, # rather than the N values in `self.value`. Also, the # comparison is performed on raw arrays, rather than Quantity # objects, for speed. if angles is None: angles = self lower = u.degree.to(angles.unit, -90.0) upper = u.degree.to(angles.unit, 90.0) # This invalid catch block can be removed when the minimum numpy # version is >= 1.19 (NUMPY_LT_1_19) with np.errstate(invalid='ignore'): invalid_angles = (np.any(angles.value < lower) or np.any(angles.value > upper)) if invalid_angles: raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, ' 'got {}'.format(angles.to(u.degree))) def __setitem__(self, item, value): # Forbid assigning a Long to a Lat. if isinstance(value, Longitude): raise TypeError("A Longitude angle cannot be assigned to a Latitude angle") # first check bounds if value is not np.ma.masked: self._validate_angles(value) super().__setitem__(item, value) # Any calculation should drop to Angle def __array_ufunc__(self, *args, **kwargs): results = super().__array_ufunc__(*args, **kwargs) return _no_angle_subclass(results) class LongitudeInfo(u.QuantityInfo): _represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',) class Longitude(Angle): """ Longitude-like angle(s) which are wrapped within a contiguous 360 degree range. A ``Longitude`` object is distinguished from a pure :class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle`` property. The ``wrap_angle`` specifies that all angle values represented by the object will be in the range:: wrap_angle - 360 * u.deg <= angle(s) < wrap_angle The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 * u.deg`` would instead result in values between -180 and +180 deg. Setting the ``wrap_angle`` attribute of an existing ``Longitude`` object will result in re-wrapping the angle values in-place. The input angle(s) can be specified either as an array, list, scalar, tuple, string, :class:`~astropy.units.Quantity` or another :class:`~astropy.coordinates.Angle`. The input parser is flexible and supports all of the input formats supported by :class:`~astropy.coordinates.Angle`. Parameters ---------- angle : tuple or angle-like The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted following the rules described for :class:`~astropy.coordinates.Angle`. If ``angle`` is a sequence or array of strings, the resulting values will be in the given ``unit``, or if `None` is provided, the unit will be taken from the first given value. unit : unit-like ['angle'], optional The unit of the value specified for the angle. This may be any string that `~astropy.units.Unit` understands, but it is better to give an actual unit object. Must be an angular unit. wrap_angle : angle-like or None, optional Angle at which to wrap back to ``wrap_angle - 360 deg``. If ``None`` (default), it will be taken to be 360 deg unless ``angle`` has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``), in which case it will be taken from there. Raises ------ `~astropy.units.UnitsError` If a unit is not provided or it is not an angular unit. `TypeError` If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`. """ _wrap_angle = None _default_wrap_angle = Angle(360 * u.deg) info = LongitudeInfo() def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs): # Forbid creating a Long from a Lat. if isinstance(angle, Latitude): raise TypeError("A Longitude angle cannot be created from " "a Latitude angle.") self = super().__new__(cls, angle, unit=unit, **kwargs) if wrap_angle is None: wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle) self.wrap_angle = wrap_angle # angle-like b/c property setter return self def __setitem__(self, item, value): # Forbid assigning a Lat to a Long. if isinstance(value, Latitude): raise TypeError("A Latitude angle cannot be assigned to a Longitude angle") super().__setitem__(item, value) self._wrap_at(self.wrap_angle) @property def wrap_angle(self): return self._wrap_angle @wrap_angle.setter def wrap_angle(self, value): self._wrap_angle = Angle(value, copy=False) self._wrap_at(self.wrap_angle) def __array_finalize__(self, obj): super().__array_finalize__(obj) self._wrap_angle = getattr(obj, '_wrap_angle', self._default_wrap_angle) # Any calculation should drop to Angle def __array_ufunc__(self, *args, **kwargs): results = super().__array_ufunc__(*args, **kwargs) return _no_angle_subclass(results)
0566c53a00c8936c9340584c62f6cff3a5adbf81512d8c765ea900aa5db37ca0
""" Module for parsing astronomical object names to extract embedded coordinates eg: '2MASS J06495091-0737408' """ import re import numpy as np import astropy.units as u from astropy.coordinates import SkyCoord RA_REGEX = r'()([0-2]\d)([0-5]\d)([0-5]\d)\.?(\d{0,3})' DEC_REGEX = r'([+-])(\d{1,2})([0-5]\d)([0-5]\d)\.?(\d{0,3})' JCOORD_REGEX = '(.*?J)' + RA_REGEX + DEC_REGEX JPARSER = re.compile(JCOORD_REGEX) def _sexagesimal(g): # convert matched regex groups to sexigesimal array sign, h, m, s, frac = g sign = -1 if (sign == '-') else 1 s = '.'.join((s, frac)) return sign * np.array([h, m, s], float) def search(name, raise_=False): """Regex match for coordinates in name""" # extract the coordinate data from name match = JPARSER.search(name) if match is None and raise_: raise ValueError('No coordinate match found!') return match def to_ra_dec_angles(name): """get RA in hourangle and DEC in degrees by parsing name """ groups = search(name, True).groups() prefix, hms, dms = np.split(groups, [1, 6]) ra = (_sexagesimal(hms) / (1, 60, 60 * 60) * u.hourangle).sum() dec = (_sexagesimal(dms) * (u.deg, u.arcmin, u.arcsec)).sum() return ra, dec def to_skycoord(name, frame='icrs'): """Convert to `name` to `SkyCoords` object""" return SkyCoord(*to_ra_dec_angles(name), frame=frame) def shorten(name): """ Produce a shortened version of the full object name using: the prefix (usually the survey name) and RA (hour, minute), DEC (deg, arcmin) parts. e.g.: '2MASS J06495091-0737408' --> '2MASS J0649-0737' Parameters ---------- name : str Full object name with J-coords embedded. Returns ------- shortName: str """ match = search(name) return ''.join(match.group(1, 3, 4, 7, 8, 9))
3f1fb418901952bd452957471370989c78d388d0373ee20db65a1ab37bb0c2e0
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # Dependencies import numpy as np # Project from astropy import units as u from astropy.utils import ShapedLikeNDArray __all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute', 'EarthLocationAttribute', 'CoordinateAttribute', 'CartesianRepresentationAttribute', 'DifferentialAttribute'] class Attribute: """A non-mutable data descriptor to hold a frame attribute. This class must be used to define frame attributes (e.g. ``equinox`` or ``obstime``) that are included in a frame class definition. Examples -------- The `~astropy.coordinates.FK4` class uses the following class attributes:: class FK4(BaseCoordinateFrame): equinox = TimeAttribute(default=_EQUINOX_B1950) obstime = TimeAttribute(default=None, secondary_attribute='equinox') This means that ``equinox`` and ``obstime`` are available to be set as keyword arguments when creating an ``FK4`` class instance and are then accessible as instance attributes. The instance value for the attribute must be stored in ``'_' + <attribute_name>`` by the frame ``__init__`` method. Note in this example that ``equinox`` and ``obstime`` are time attributes and use the ``TimeAttributeFrame`` class. This subclass overrides the ``convert_input`` method to validate and convert inputs into a ``Time`` object. Parameters ---------- default : object Default value for the attribute if not provided secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. """ name = '<unbound>' def __init__(self, default=None, secondary_attribute=''): self.default = default self.secondary_attribute = secondary_attribute super().__init__() def __set_name__(self, owner, name): self.name = name def convert_input(self, value): """ Validate the input ``value`` and convert to expected attribute class. The base method here does nothing, but subclasses can implement this as needed. The method should catch any internal exceptions and raise ValueError with an informative message. The method returns the validated input along with a boolean that indicates whether the input value was actually converted. If the input value was already the correct type then the ``converted`` return value should be ``False``. Parameters ---------- value : object Input value to be converted. Returns ------- output_value : object The ``value`` converted to the correct type (or just ``value`` if ``converted`` is False) converted : bool True if the conversion was actually performed, False otherwise. Raises ------ ValueError If the input is not valid for this attribute. """ return value, False def __get__(self, instance, frame_cls=None): if instance is None: out = self.default else: out = getattr(instance, '_' + self.name, self.default) if out is None: out = getattr(instance, self.secondary_attribute, self.default) out, converted = self.convert_input(out) if instance is not None: instance_shape = getattr(instance, 'shape', None) # None if instance (frame) has no data! if instance_shape is not None and (getattr(out, 'shape', ()) and out.shape != instance_shape): # If the shapes do not match, try broadcasting. try: if isinstance(out, ShapedLikeNDArray): out = out._apply(np.broadcast_to, shape=instance_shape, subok=True) else: out = np.broadcast_to(out, instance_shape, subok=True) except ValueError: # raise more informative exception. raise ValueError( "attribute {} should be scalar or have shape {}, " "but is has shape {} and could not be broadcast." .format(self.name, instance_shape, out.shape)) converted = True if converted: setattr(instance, '_' + self.name, out) return out def __set__(self, instance, val): raise AttributeError('Cannot set frame attribute') class TimeAttribute(Attribute): """ Frame attribute descriptor for quantities that are Time objects. See the `~astropy.coordinates.Attribute` API doc for further information. Parameters ---------- default : object Default value for the attribute if not provided secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. """ def convert_input(self, value): """ Convert input value to a Time object and validate by running through the Time constructor. Also check that the input was a scalar. Parameters ---------- value : object Input value to be converted. Returns ------- out, converted : correctly-typed object, boolean Tuple consisting of the correctly-typed object and a boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ from astropy.time import Time if value is None: return None, False if isinstance(value, Time): out = value converted = False else: try: out = Time(value) except Exception as err: raise ValueError( f'Invalid time input {self.name}={value!r}.') from err converted = True # Set attribute as read-only for arrays (not allowed by numpy # for array scalars) if out.shape: out.writeable = False return out, converted class CartesianRepresentationAttribute(Attribute): """ A frame attribute that is a CartesianRepresentation with specified units. Parameters ---------- default : object Default value for the attribute if not provided secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. unit : unit-like or None Name of a unit that the input will be converted into. If None, no unit-checking or conversion is performed """ def __init__(self, default=None, secondary_attribute='', unit=None): super().__init__(default, secondary_attribute) self.unit = unit def convert_input(self, value): """ Checks that the input is a CartesianRepresentation with the correct unit, or the special value ``[0, 0, 0]``. Parameters ---------- value : object Input value to be converted. Returns ------- out : object The correctly-typed object. converted : boolean A boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ if (isinstance(value, list) and len(value) == 3 and all(v == 0 for v in value) and self.unit is not None): return CartesianRepresentation(np.zeros(3) * self.unit), True else: # is it a CartesianRepresentation with correct unit? if hasattr(value, 'xyz') and value.xyz.unit == self.unit: return value, False converted = True # if it's a CartesianRepresentation, get the xyz Quantity value = getattr(value, 'xyz', value) if not hasattr(value, 'unit'): raise TypeError('tried to set a {} with something that does ' 'not have a unit.' .format(self.__class__.__name__)) value = value.to(self.unit) # now try and make a CartesianRepresentation. cartrep = CartesianRepresentation(value, copy=False) return cartrep, converted class QuantityAttribute(Attribute): """ A frame attribute that is a quantity with specified units and shape (optionally). Can be `None`, which should be used for special cases in associated frame transformations like "this quantity should be ignored" or similar. Parameters ---------- default : number or `~astropy.units.Quantity` or None, optional Default value for the attribute if the user does not supply one. If a Quantity, it must be consistent with ``unit``, or if a value, ``unit`` cannot be None. secondary_attribute : str, optional Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. unit : unit-like or None, optional Name of a unit that the input will be converted into. If None, no unit-checking or conversion is performed shape : tuple or None, optional If given, specifies the shape the attribute must be """ def __init__(self, default=None, secondary_attribute='', unit=None, shape=None): if default is None and unit is None: raise ValueError('Either a default quantity value must be ' 'provided, or a unit must be provided to define a ' 'QuantityAttribute.') if default is not None and unit is None: unit = default.unit self.unit = unit self.shape = shape default = self.convert_input(default)[0] super().__init__(default, secondary_attribute) def convert_input(self, value): """ Checks that the input is a Quantity with the necessary units (or the special value ``0``). Parameters ---------- value : object Input value to be converted. Returns ------- out, converted : correctly-typed object, boolean Tuple consisting of the correctly-typed object and a boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ if value is None: return None, False if (not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled and np.any(value != 0)): raise TypeError('Tried to set a QuantityAttribute with ' 'something that does not have a unit.') oldvalue = value value = u.Quantity(oldvalue, self.unit, copy=False) if self.shape is not None and value.shape != self.shape: if value.shape == () and oldvalue == 0: # Allow a single 0 to fill whatever shape is needed. value = np.broadcast_to(value, self.shape, subok=True) else: raise ValueError( f'The provided value has shape "{value.shape}", but ' f'should have shape "{self.shape}"') converted = oldvalue is not value return value, converted class EarthLocationAttribute(Attribute): """ A frame attribute that can act as a `~astropy.coordinates.EarthLocation`. It can be created as anything that can be transformed to the `~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation` when accessed after creation. Parameters ---------- default : object Default value for the attribute if not provided secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. """ def convert_input(self, value): """ Checks that the input is a Quantity with the necessary units (or the special value ``0``). Parameters ---------- value : object Input value to be converted. Returns ------- out, converted : correctly-typed object, boolean Tuple consisting of the correctly-typed object and a boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ if value is None: return None, False elif isinstance(value, EarthLocation): return value, False else: # we have to do the import here because of some tricky circular deps from .builtin_frames import ITRS if not hasattr(value, 'transform_to'): raise ValueError('"{}" was passed into an ' 'EarthLocationAttribute, but it does not have ' '"transform_to" method'.format(value)) itrsobj = value.transform_to(ITRS()) return itrsobj.earth_location, True class CoordinateAttribute(Attribute): """ A frame attribute which is a coordinate object. It can be given as a `~astropy.coordinates.SkyCoord` or a low-level frame instance. If a low-level frame instance is provided, it will always be upgraded to be a `~astropy.coordinates.SkyCoord` to ensure consistent transformation behavior. The coordinate object will always be returned as a low-level frame instance when accessed. Parameters ---------- frame : `~astropy.coordinates.BaseCoordinateFrame` class The type of frame this attribute can be default : object Default value for the attribute if not provided secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. """ def __init__(self, frame, default=None, secondary_attribute=''): self._frame = frame super().__init__(default, secondary_attribute) def convert_input(self, value): """ Checks that the input is a SkyCoord with the necessary units (or the special value ``None``). Parameters ---------- value : object Input value to be converted. Returns ------- out, converted : correctly-typed object, boolean Tuple consisting of the correctly-typed object and a boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ from astropy.coordinates import SkyCoord if value is None: return None, False elif isinstance(value, self._frame): return value, False else: value = SkyCoord(value) # always make the value a SkyCoord transformedobj = value.transform_to(self._frame) return transformedobj.frame, True class DifferentialAttribute(Attribute): """A frame attribute which is a differential instance. The optional ``allowed_classes`` argument allows specifying a restricted set of valid differential classes to check the input against. Otherwise, any `~astropy.coordinates.BaseDifferential` subclass instance is valid. Parameters ---------- default : object Default value for the attribute if not provided allowed_classes : tuple, optional A list of allowed differential classes for this attribute to have. secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. """ def __init__(self, default=None, allowed_classes=None, secondary_attribute=''): if allowed_classes is not None: self.allowed_classes = tuple(allowed_classes) else: self.allowed_classes = BaseDifferential super().__init__(default, secondary_attribute) def convert_input(self, value): """ Checks that the input is a differential object and is one of the allowed class types. Parameters ---------- value : object Input value. Returns ------- out, converted : correctly-typed object, boolean Tuple consisting of the correctly-typed object and a boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ if value is None: return None, False if not isinstance(value, self.allowed_classes): if len(self.allowed_classes) == 1: value = self.allowed_classes[0](value) else: raise TypeError('Tried to set a DifferentialAttribute with ' 'an unsupported Differential type {}. Allowed ' 'classes are: {}' .format(value.__class__, self.allowed_classes)) return value, True # do this here to prevent a series of complicated circular imports from .earth import EarthLocation from .representation import CartesianRepresentation, BaseDifferential
98d2632db173ddf7793886794b8f7b207e990e21a5cc5fe4bc935a6722792794
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains a helper function to fill erfa.astrom struct and a ScienceState, which allows to speed up coordinate transformations at the expense of accuracy. """ import warnings import numpy as np import erfa from astropy.time import Time from astropy.utils.state import ScienceState import astropy.units as u from astropy.utils.exceptions import AstropyWarning from .builtin_frames.utils import ( get_jd12, get_cip, prepare_earth_position_vel, get_polar_motion, pav2pv ) from .matrix_utilities import rotation_matrix __all__ = [] class ErfaAstrom: ''' The default provider for astrometry values. A utility class to extract the necessary arguments for erfa functions from frame attributes, call the corresponding erfa functions and return the astrom object. ''' @staticmethod def apco(frame_or_coord): ''' Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, an AltAz or CIRS frame is expected. ''' lon, lat, height = frame_or_coord.location.to_geodetic('WGS84') obstime = frame_or_coord.obstime jd1_tt, jd2_tt = get_jd12(obstime, 'tt') xp, yp = get_polar_motion(obstime) sp = erfa.sp00(jd1_tt, jd2_tt) x, y, s = get_cip(jd1_tt, jd2_tt) era = erfa.era00(*get_jd12(obstime, 'ut1')) earth_pv, earth_heliocentric = prepare_earth_position_vel(obstime) # refraction constants if hasattr(frame_or_coord, 'pressure'): # this is an AltAz like frame. Calculate refraction refa, refb = erfa.refco( frame_or_coord.pressure.to_value(u.hPa), frame_or_coord.temperature.to_value(u.deg_C), frame_or_coord.relative_humidity.value, frame_or_coord.obswl.to_value(u.micron) ) else: # This is not an AltAz frame, so don't bother computing refraction refa, refb = 0.0, 0.0 return erfa.apco( jd1_tt, jd2_tt, earth_pv, earth_heliocentric, x, y, s, era, lon.to_value(u.radian), lat.to_value(u.radian), height.to_value(u.m), xp, yp, sp, refa, refb ) @staticmethod def apcs(frame_or_coord): ''' Wrapper for ``erfa.apcs``, used in conversions GCRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, a GCRS frame is expected. ''' jd1_tt, jd2_tt = get_jd12(frame_or_coord.obstime, 'tt') obs_pv = pav2pv( frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value, frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value ) earth_pv, earth_heliocentric = prepare_earth_position_vel(frame_or_coord.obstime) return erfa.apcs(jd1_tt, jd2_tt, obs_pv, earth_pv, earth_heliocentric) @staticmethod def apio(frame_or_coord): ''' Slightly modified equivalent of ``erfa.apio``, used in conversions AltAz <-> CIRS. Since we use a topocentric CIRS frame, we have dropped the steps needed to calculate diurnal aberration. Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, an AltAz frame is expected. ''' # Calculate erfa.apio input parameters. # TIO locator s' sp = erfa.sp00(*get_jd12(frame_or_coord.obstime, 'tt')) # Earth rotation angle. theta = erfa.era00(*get_jd12(frame_or_coord.obstime, 'ut1')) # Longitude and latitude in radians. lon, lat, height = frame_or_coord.location.to_geodetic('WGS84') elong = lon.to_value(u.radian) phi = lat.to_value(u.radian) # Polar motion, rotated onto local meridian xp, yp = get_polar_motion(frame_or_coord.obstime) # we need an empty astrom structure before we fill in the required sections astrom = np.zeros(frame_or_coord.obstime.shape, dtype=erfa.dt_eraASTROM) # Form the rotation matrix, CIRS to apparent [HA,Dec]. r = (rotation_matrix(elong, 'z', unit=u.radian) @ rotation_matrix(-yp, 'x', unit=u.radian) @ rotation_matrix(-xp, 'y', unit=u.radian) @ rotation_matrix(theta+sp, 'z', unit=u.radian)) # Solve for local Earth rotation angle. a = r[..., 0, 0] b = r[..., 0, 1] eral = np.arctan2(b, a) astrom['eral'] = eral # Solve for polar motion [X,Y] with respect to local meridian. c = r[..., 0, 2] astrom['xpl'] = np.arctan2(c, np.sqrt(a*a+b*b)) a = r[..., 1, 2] b = r[..., 2, 2] astrom['ypl'] = -np.arctan2(a, b) # Adjusted longitude. astrom['along'] = erfa.anpm(eral - theta) # Functions of latitude. astrom['sphi'] = np.sin(phi) astrom['cphi'] = np.cos(phi) # Omit two steps that are zero for a geocentric observer: # Observer's geocentric position and velocity (m, m/s, CIRS). # Magnitude of diurnal aberration vector. # Refraction constants. astrom['refa'], astrom['refb'] = erfa.refco( frame_or_coord.pressure.to_value(u.hPa), frame_or_coord.temperature.to_value(u.deg_C), frame_or_coord.relative_humidity.value, frame_or_coord.obswl.to_value(u.micron) ) return astrom class ErfaAstromInterpolator(ErfaAstrom): ''' A provider for astrometry values that does not call erfa for each individual timestamp but interpolates linearly between support points. For the interpolation, float64 MJD values are used, so time precision for the interpolation will be around a microsecond. This can dramatically speed up coordinate transformations, e.g. between CIRS and ICRS, when obstime is an array of many values (factors of 10 to > 100 depending on the selected resolution, number of points and the time range of the values). The precision of the transformation will still be in the order of microseconds for reasonable values of time_resolution, e.g. ``300 * u.s``. Users should benchmark performance and accuracy with the default transformation for their specific use case and then choose a suitable ``time_resolution`` from there. This class is intended be used together with the ``erfa_astrom`` science state, e.g. in a context manager like this Example ------- >>> from astropy.coordinates import SkyCoord, CIRS >>> from astropy.coordinates.erfa_astrom import erfa_astrom, ErfaAstromInterpolator >>> import astropy.units as u >>> from astropy.time import Time >>> import numpy as np >>> obstime = Time('2010-01-01T20:00:00') + np.linspace(0, 4, 1000) * u.hour >>> crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s') >>> with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)): ... cirs = crab.transform_to(CIRS(obstime=obstime)) ''' @u.quantity_input(time_resolution=u.day) def __init__(self, time_resolution): if time_resolution.to_value(u.us) < 10: warnings.warn( f'Using {self.__class__.__name__} with `time_resolution`' ' below 10 microseconds might lead to numerical inaccuracies' ' as the MJD-based interpolation is limited by floating point ' ' precision to about a microsecond of precision', AstropyWarning ) self.mjd_resolution = time_resolution.to_value(u.day) def _get_support_points(self, obstime): ''' Calculate support points for the interpolation. We divide the MJD by the time resolution (as single float64 values), and calculate ceil and floor. Then we take the unique and sorted values and scale back to MJD. This will create a sparse support for non-regular input obstimes. ''' mjd_scaled = np.ravel(obstime.mjd / self.mjd_resolution) # unique already does sorting mjd_u = np.unique(np.concatenate([ np.floor(mjd_scaled), np.ceil(mjd_scaled), ])) return Time( mjd_u * self.mjd_resolution, format='mjd', scale=obstime.scale, ) @staticmethod def _prepare_earth_position_vel(support, obstime): """ Calculate Earth's position and velocity. Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ pv_support, heliocentric_support = prepare_earth_position_vel(support) # do interpolation earth_pv = np.empty(obstime.shape, dtype=erfa.dt_pv) earth_heliocentric = np.empty(obstime.shape + (3,)) for dim in range(3): for key in 'pv': earth_pv[key][..., dim] = np.interp( obstime.mjd, support.mjd, pv_support[key][..., dim] ) earth_heliocentric[..., dim] = np.interp( obstime.mjd, support.mjd, heliocentric_support[..., dim] ) return earth_pv, earth_heliocentric @staticmethod def _get_c2i(support, obstime): """ Calculate the Celestial-to-Intermediate rotation matrix. Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ jd1_tt_support, jd2_tt_support = get_jd12(support, 'tt') c2i_support = erfa.c2i06a(jd1_tt_support, jd2_tt_support) c2i = np.empty(obstime.shape + (3, 3)) for dim1 in range(3): for dim2 in range(3): c2i[..., dim1, dim2] = np.interp(obstime.mjd, support.mjd, c2i_support[..., dim1, dim2]) return c2i @staticmethod def _get_cip(support, obstime): """ Find the X, Y coordinates of the CIP and the CIO locator, s. Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ jd1_tt_support, jd2_tt_support = get_jd12(support, 'tt') cip_support = get_cip(jd1_tt_support, jd2_tt_support) return tuple( np.interp(obstime.mjd, support.mjd, cip_component) for cip_component in cip_support ) @staticmethod def _get_polar_motion(support, obstime): """ Find the two polar motion components in radians Uses the coarser grid ``support`` to do the calculation, and interpolates onto the finer grid ``obstime``. """ polar_motion_support = get_polar_motion(support) return tuple( np.interp(obstime.mjd, support.mjd, polar_motion_component) for polar_motion_component in polar_motion_support ) def apco(self, frame_or_coord): ''' Wrapper for ``erfa.apco``, used in conversions AltAz <-> ICRS and CIRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, an AltAz or CIRS frame is expected. ''' lon, lat, height = frame_or_coord.location.to_geodetic('WGS84') obstime = frame_or_coord.obstime support = self._get_support_points(obstime) jd1_tt, jd2_tt = get_jd12(obstime, 'tt') # get the position and velocity arrays for the observatory. Need to # have xyz in last dimension, and pos/vel in one-but-last. earth_pv, earth_heliocentric = self._prepare_earth_position_vel(support, obstime) xp, yp = self._get_polar_motion(support, obstime) sp = erfa.sp00(jd1_tt, jd2_tt) x, y, s = self._get_cip(support, obstime) era = erfa.era00(*get_jd12(obstime, 'ut1')) # refraction constants if hasattr(frame_or_coord, 'pressure'): # an AltAz like frame. Include refraction refa, refb = erfa.refco( frame_or_coord.pressure.to_value(u.hPa), frame_or_coord.temperature.to_value(u.deg_C), frame_or_coord.relative_humidity.value, frame_or_coord.obswl.to_value(u.micron) ) else: # a CIRS like frame - no refraction refa, refb = 0.0, 0.0 return erfa.apco( jd1_tt, jd2_tt, earth_pv, earth_heliocentric, x, y, s, era, lon.to_value(u.radian), lat.to_value(u.radian), height.to_value(u.m), xp, yp, sp, refa, refb ) def apcs(self, frame_or_coord): ''' Wrapper for ``erfa.apci``, used in conversions GCRS <-> ICRS Parameters ---------- frame_or_coord : ``astropy.coordinates.BaseCoordinateFrame`` or ``astropy.coordinates.SkyCoord`` Frame or coordinate instance in the corresponding frame for which to calculate the calculate the astrom values. For this function, a GCRS frame is expected. ''' obstime = frame_or_coord.obstime support = self._get_support_points(obstime) # get the position and velocity arrays for the observatory. Need to # have xyz in last dimension, and pos/vel in one-but-last. earth_pv, earth_heliocentric = self._prepare_earth_position_vel(support, obstime) pv = pav2pv( frame_or_coord.obsgeoloc.get_xyz(xyz_axis=-1).value, frame_or_coord.obsgeovel.get_xyz(xyz_axis=-1).value ) jd1_tt, jd2_tt = get_jd12(obstime, 'tt') return erfa.apcs(jd1_tt, jd2_tt, pv, earth_pv, earth_heliocentric) class erfa_astrom(ScienceState): """ ScienceState to select with astrom provider is used in coordinate transformations. """ _value = ErfaAstrom() @classmethod def validate(cls, value): if not isinstance(value, ErfaAstrom): raise TypeError(f'Must be an instance of {ErfaAstrom!r}') return value
8faff0723956dcccb1bda9085b3e5f2a81cb54e528e3498afb3b05539931aa10
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # Standard library import re import textwrap import warnings from datetime import datetime from urllib.request import urlopen, Request # Third-party from astropy import time as atime from astropy.utils.console import color_print, _color_text from . import get_sun __all__ = [] class HumanError(ValueError): pass class CelestialError(ValueError): pass def get_sign(dt): """ """ if ((int(dt.month) == 12 and int(dt.day) >= 22)or(int(dt.month) == 1 and int(dt.day) <= 19)): zodiac_sign = "capricorn" elif ((int(dt.month) == 1 and int(dt.day) >= 20)or(int(dt.month) == 2 and int(dt.day) <= 17)): zodiac_sign = "aquarius" elif ((int(dt.month) == 2 and int(dt.day) >= 18)or(int(dt.month) == 3 and int(dt.day) <= 19)): zodiac_sign = "pisces" elif ((int(dt.month) == 3 and int(dt.day) >= 20)or(int(dt.month) == 4 and int(dt.day) <= 19)): zodiac_sign = "aries" elif ((int(dt.month) == 4 and int(dt.day) >= 20)or(int(dt.month) == 5 and int(dt.day) <= 20)): zodiac_sign = "taurus" elif ((int(dt.month) == 5 and int(dt.day) >= 21)or(int(dt.month) == 6 and int(dt.day) <= 20)): zodiac_sign = "gemini" elif ((int(dt.month) == 6 and int(dt.day) >= 21)or(int(dt.month) == 7 and int(dt.day) <= 22)): zodiac_sign = "cancer" elif ((int(dt.month) == 7 and int(dt.day) >= 23)or(int(dt.month) == 8 and int(dt.day) <= 22)): zodiac_sign = "leo" elif ((int(dt.month) == 8 and int(dt.day) >= 23)or(int(dt.month) == 9 and int(dt.day) <= 22)): zodiac_sign = "virgo" elif ((int(dt.month) == 9 and int(dt.day) >= 23)or(int(dt.month) == 10 and int(dt.day) <= 22)): zodiac_sign = "libra" elif ((int(dt.month) == 10 and int(dt.day) >= 23)or(int(dt.month) == 11 and int(dt.day) <= 21)): zodiac_sign = "scorpio" elif ((int(dt.month) == 11 and int(dt.day) >= 22)or(int(dt.month) == 12 and int(dt.day) <= 21)): zodiac_sign = "sagittarius" return zodiac_sign _VALID_SIGNS = ["capricorn", "aquarius", "pisces", "aries", "taurus", "gemini", "cancer", "leo", "virgo", "libra", "scorpio", "sagittarius"] # Some of the constellation names map to different astrological "sign names". # Astrologers really needs to talk to the IAU... _CONST_TO_SIGNS = {'capricornus': 'capricorn', 'scorpius': 'scorpio'} _ZODIAC = ((1900, "rat"), (1901, "ox"), (1902, "tiger"), (1903, "rabbit"), (1904, "dragon"), (1905, "snake"), (1906, "horse"), (1907, "goat"), (1908, "monkey"), (1909, "rooster"), (1910, "dog"), (1911, "pig")) # https://stackoverflow.com/questions/12791871/chinese-zodiac-python-program def _get_zodiac(yr): return _ZODIAC[(yr - _ZODIAC[0][0]) % 12][1] def horoscope(birthday, corrected=True, chinese=False): """ Enter your birthday as an `astropy.time.Time` object and receive a mystical horoscope about things to come. Parameters ---------- birthday : `astropy.time.Time` or str Your birthday as a `datetime.datetime` or `astropy.time.Time` object or "YYYY-MM-DD"string. corrected : bool Whether to account for the precession of the Earth instead of using the ancient Greek dates for the signs. After all, you do want your *real* horoscope, not a cheap inaccurate approximation, right? chinese : bool Chinese annual zodiac wisdom instead of Western one. Returns ------- Infinite wisdom, condensed into astrologically precise prose. Notes ----- This function was implemented on April 1. Take note of that date. """ from bs4 import BeautifulSoup today = datetime.now() err_msg = "Invalid response from celestial gods (failed to load horoscope)." headers = {'User-Agent': 'foo/bar'} special_words = { '([sS]tar[s^ ]*)': 'yellow', '([yY]ou[^ ]*)': 'magenta', '([pP]lay[^ ]*)': 'blue', '([hH]eart)': 'red', '([fF]ate)': 'lightgreen', } if isinstance(birthday, str): birthday = datetime.strptime(birthday, '%Y-%m-%d') if chinese: # TODO: Make this more accurate by using the actual date, not just year # Might need third-party tool like https://pypi.org/project/lunardate zodiac_sign = _get_zodiac(birthday.year) url = ('https://www.horoscope.com/us/horoscopes/yearly/' '{}-chinese-horoscope-{}.aspx'.format(today.year, zodiac_sign)) summ_title_sfx = f'in {today.year}' try: res = Request(url, headers=headers) with urlopen(res) as f: try: doc = BeautifulSoup(f, 'html.parser') # TODO: Also include Love, Family & Friends, Work, Money, More? item = doc.find(id='overview') desc = item.getText() except Exception: raise CelestialError(err_msg) except Exception: raise CelestialError(err_msg) else: birthday = atime.Time(birthday) if corrected: with warnings.catch_warnings(): warnings.simplefilter('ignore') # Ignore ErfaWarning zodiac_sign = get_sun(birthday).get_constellation().lower() zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign) if zodiac_sign not in _VALID_SIGNS: raise HumanError('On your birthday the sun was in {}, which is not ' 'a sign of the zodiac. You must not exist. Or ' 'maybe you can settle for ' 'corrected=False.'.format(zodiac_sign.title())) else: zodiac_sign = get_sign(birthday.to_datetime()) url = f"http://www.astrology.com/us/horoscope/daily-overview.aspx?sign={zodiac_sign}" summ_title_sfx = f"on {today.strftime('%Y-%m-%d')}" res = Request(url, headers=headers) with urlopen(res) as f: try: doc = BeautifulSoup(f, 'html.parser') item = doc.find('div', {'id': 'content'}) desc = item.getText() except Exception: raise CelestialError(err_msg) print("*"*79) color_print(f"Horoscope for {zodiac_sign.capitalize()} {summ_title_sfx}:", 'green') print("*"*79) for block in textwrap.wrap(desc, 79): split_block = block.split() for i, word in enumerate(split_block): for re_word in special_words.keys(): match = re.search(re_word, word) if match is None: continue split_block[i] = _color_text(match.groups()[0], special_words[re_word]) print(" ".join(split_block)) def inject_horoscope(): import astropy astropy._yourfuture = horoscope inject_horoscope()