author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
377,522 | 01.03.2020 02:04:40 | -10,800 | 6282e21ef63465cdc76ad832708ce9041997cd7e | Try to work around error on istalling pandas with Python 2.7 | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/pythonpackage.yml",
"new_path": ".github/workflows/pythonpackage.yml",
"diff": "@@ -19,7 +19,8 @@ jobs:\n- name: Install dependencies\nrun: |\npython -m pip install --upgrade pip\n- pip install lxml numpy sqlalchemy pandas cython\n+ pip install numpy\n+ pip install lxml sqlalchemy pandas cython\npip install pynumpress\n- name: Lint with flake8\nrun: |\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Try to work around error on istalling pandas with Python 2.7 |
377,522 | 01.03.2020 02:07:57 | -10,800 | 599b9679adc0cc49b6249c791b6ba61f6c038e66 | Remove the linting step for now | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/pythonpackage.yml",
"new_path": ".github/workflows/pythonpackage.yml",
"diff": "@@ -22,13 +22,6 @@ jobs:\npip install numpy\npip install lxml sqlalchemy pandas cython\npip install pynumpress\n- - name: Lint with flake8\n- run: |\n- pip install flake8\n- # stop the build if there are Python syntax errors or undefined names\n- flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n- # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide\n- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n- name: Run the tests\nrun: |\ncd tests; find . -name 'test_*.py' -print0 | xargs -0 -n1 env PYTHONPATH=.. python\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove the linting step for now |
377,522 | 01.03.2020 18:15:36 | -10,800 | 2f1928a2054e91f59f0fb3f400f6f401cf95cc5c | Create a PyPI publishing workflow | [
{
"change_type": "ADD",
"old_path": null,
"new_path": ".github/workflows/pythonpublish.yml",
"diff": "+name: Upload Python Package\n+\n+on:\n+ release:\n+ types: [created]\n+\n+jobs:\n+ deploy:\n+ runs-on: ubuntu-latest\n+ steps:\n+ - uses: actions/checkout@v2\n+ - name: Set up Python\n+ uses: actions/setup-python@v1\n+ with:\n+ python-version: '3.x'\n+ - name: Install dependencies\n+ run: |\n+ python -m pip install --upgrade pip\n+ pip install setuptools wheel twine\n+ - name: Build and publish\n+ env:\n+ TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}\n+ TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}\n+ run: |\n+ python setup.py sdist bdist_wheel --universal\n+ twine upload dist/*\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Create a PyPI publishing workflow |
377,522 | 01.03.2020 18:27:16 | -10,800 | a764d47d8929f9fc75bb44fd0bf1ddd8be8b4bae | Add a contributor from before move | [
{
"change_type": "MODIFY",
"old_path": "AUTHORS",
"new_path": "AUTHORS",
"diff": "List of contributors (in chronological order)\n---------------------------------------------\n+On Bitbucket:\n+.............\n+\nAnton Goloborodko [golobor]\nLev Levitsky [levitsky]\nMark Ivanov [markmipt]\n@@ -12,3 +15,4 @@ James Johnson [jjohnson]\nTalat Khattatov\nMichael Porter [emptyport]\nSean Peters [speters-cmri]\n+[manor_a]\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add a contributor from before move |
377,522 | 20.04.2020 19:37:38 | -10,800 | d5c4013dcb316736fdc6d53d3eb804939f39e7ec | Fix broken bitbucket links | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -81,7 +81,7 @@ API changes\n4.0.1\n-----\n-Fix issue `#35 <https://bitbucket.org/levitsky/pyteomics/issues/35/ordereddict-may-be-in-reversed-order-on>`_\n+Fix issue `#35 <hhttps://levitsky.github.io/bitbucket_backup/#!/levitsky/pyteomics/issues/35/page/1>`_\n(incorrect order of deserialized offset indexes on older Python versions).\n4.0\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/examples/example_filtering.rst",
"new_path": "doc/source/examples/example_filtering.rst",
"diff": "@@ -13,4 +13,4 @@ The files used in this example can be downloaded from\nThe example, including code, figures, and accompanying text, is contained in the IPython Notebook file.\n`View the rendered notebook online.\n-<http://nbviewer.ipython.org/url/bitbucket.org/levitsky/pyteomics/raw/tip/doc/source/examples/filtering/filtering.ipynb>`_\n\\ No newline at end of file\n+<https://github.com/levitsky/pyteomics/blob/master/doc/source/examples/filtering/filtering.ipynb>`_\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix broken bitbucket links |
377,522 | 27.04.2020 02:11:20 | -10,800 | 511655946a393f4aa214685ab7aa7450a9ab8cca | Fix error when fragment ion mass spectrum is not present in tandem xml file | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.3a1\n\\ No newline at end of file\n+4.3a2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -198,6 +198,7 @@ class PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):\ninfo['search_hit'].sort(key=lambda x: x['hit_rank'])\nreturn info\n+\ndef read(source, read_schema=False, iterative=True, **kwargs):\n\"\"\"Parse `source` and iterate through peptide-spectrum matches.\n@@ -225,6 +226,7 @@ def read(source, read_schema=False, iterative=True, **kwargs):\nreturn PepXML(source, read_schema=read_schema, iterative=iterative)\n+\ndef iterfind(source, path, **kwargs):\n\"\"\"Parse `source` and yield info on elements with specified local\nname or by specified \"XPath\".\n@@ -271,8 +273,10 @@ def iterfind(source, path, **kwargs):\n\"\"\"\nreturn PepXML(source, **kwargs).iterfind(path, **kwargs)\n+\nversion_info = xml._make_version_info(PepXML)\n+\ndef roc_curve(source):\n\"\"\"Parse source and return a ROC curve for peptideprophet analysis.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -134,13 +134,12 @@ class TandemXML(xml.XML):\nfor d in info['support'].get('supporting data', {}).values():\nfor l in ['Xdata', 'Ydata']:\nd[l]['values'] = d[l]['values'].astype(int)\n+ del d[l]['label']\n+ if 'fragment ion mass spectrum' in info['support']:\nfims = info['support']['fragment ion mass spectrum']\nfims.update(fims.pop('tandem mass spectrum'))\n- for d in it.chain(\n- info['support'].get('supporting data', {}).values(),\n- (info['support']['fragment ion mass spectrum'],)):\nfor l in ['Xdata', 'Ydata']:\n- del d[l]['label']\n+ del info['support']['fragment ion mass spectrum'][l]['label']\nif 'charge' in info:\ninfo['charge'] = int(info['charge'])\nreturn info\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix error when fragment ion mass spectrum is not present in tandem xml file |
377,522 | 01.05.2020 23:09:21 | -10,800 | 46edfdc9418a9efd6f243ff08acf841fe936bd9a | Parse tandem rt as duration, convert duration to float on failure | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/_schema_defaults.py",
"new_path": "pyteomics/_schema_defaults.py",
"diff": "@@ -288,7 +288,7 @@ _tandem_schema_defaults = {'ints': {\n'a_ions', 'x_ions', 'c_ions', 'z_ions']},\n'floats': {('group', k) for k in [\n- 'fI', 'sumI', 'maxI', 'mh', 'expect', 'rt']} | {\n+ 'fI', 'sumI', 'maxI', 'mh', 'expect']} | {\n('domain', k) for k in [\n'expect', 'hyperscore', 'b_score', 'y_score',\n'a_score', 'x_score', 'c_score', 'z_score',\n@@ -299,7 +299,7 @@ _tandem_schema_defaults = {'ints': {\n'bools': set(),\n'lists': {'group', 'trace', 'attribute', 'protein', 'aa', 'note'},\n'floatlists': {('values', 'values')},\n- 'intlists': set(), 'charlists': set()}\n+ 'intlists': set(), 'charlists': set(), 'duration': {('group', 'rt')}}\n_mzxml_schema_defaults = {'bools': {('dataProcessing', 'centroided'),\n('dataProcessing', 'chargeDeconvoluted'),\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -86,10 +86,10 @@ This module requires :py:mod:`lxml` and :py:mod:`numpy`.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-import itertools as it\nimport operator\nfrom . import xml, auxiliary as aux, _schema_defaults\n+\nclass TandemXML(xml.XML):\n\"\"\"Parser class for TandemXML files.\"\"\"\nfile_format = \"TandemXML\"\n@@ -109,9 +109,7 @@ class TandemXML(xml.XML):\ndef _get_info_smart(self, element, **kw):\ninfo = self._get_info(element, **kw)\n# handy simplifications below\n- if isinstance(info.get('note'), list\n- ) and len(info['note']) == 1 and set(\n- info['note'][0]) == {'label', 'note'}:\n+ if isinstance(info.get('note'), list) and len(info['note']) == 1 and set(info['note'][0]) == {'label', 'note'}:\ninfo['note'] = info['note'][0]['note']\nif 'protein' in info and 'label' in info:\ndel info['label']\n@@ -142,6 +140,7 @@ class TandemXML(xml.XML):\ndel info['support']['fragment ion mass spectrum'][l]['label']\nif 'charge' in info:\ninfo['charge'] = int(info['charge'])\n+\nreturn info\ndef _get_schema_info(self, read_schema):\n@@ -154,6 +153,7 @@ class TandemXML(xml.XML):\nnext = __next__\n+\ndef read(source, iterative=True, **kwargs):\n\"\"\"Parse `source` and iterate through peptide-spectrum matches.\n@@ -172,8 +172,8 @@ def read(source, iterative=True, **kwargs):\nout : iterator\nAn iterator over dicts with PSM properties.\n\"\"\"\n- return TandemXML(source, read_schema=False,\n- recursive=True, iterative=iterative)\n+ return TandemXML(source, read_schema=False, recursive=True, iterative=iterative)\n+\ndef iterfind(source, path, **kwargs):\n\"\"\"Parse `source` and yield info on elements with specified local\n@@ -237,6 +237,7 @@ def _is_decoy_prefix(psm, prefix='DECOY_'):\n\"\"\"\nreturn all(prot['label'].startswith(prefix) for prot in psm['protein'])\n+\ndef _is_decoy_suffix(psm, suffix='_DECOY'):\n\"\"\"Given a PSM dict, return :py:const:`True` if all protein names for\nthe PSM end with `suffix`, and :py:const:`False` otherwise.\n@@ -254,12 +255,14 @@ def _is_decoy_suffix(psm, suffix='_DECOY'):\n\"\"\"\nreturn all(prot['label'].endswith(suffix) for prot in psm['protein'])\n+\nis_decoy = _is_decoy_prefix\nqvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, operator.itemgetter('expect'))\nfilter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, operator.itemgetter('expect'), qvalues)\nfdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)\nfilter.chain = aux._make_chain(filter, 'filter', True)\n+\ndef DataFrame(*args, **kwargs):\n\"\"\"Read X!Tandem output files into a :py:class:`pandas.DataFrame`.\n@@ -318,6 +321,7 @@ def DataFrame(*args, **kwargs):\ndata.append(info)\nreturn pd.DataFrame(data, **pd_kwargs)\n+\ndef filter_df(*args, **kwargs):\n\"\"\"Read X!Tandem output files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\nPositional arguments can be X!Tandem output files or DataFrames.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -121,9 +121,12 @@ class XMLValueConverter(object):\n@classmethod\ndef duration_str_to_float(cls, s):\n- # Not a duration, so pass along unchanged\n- if not s.startswith(\"P\"):\n- return unitstr(s, \"duration\")\n+ # Not a duration, so pass along\n+ if not s.startswith('P'):\n+ try:\n+ return unitfloat(s, 'duration')\n+ except ValueError:\n+ return unitstr(s, 'duration')\nmatch = cls._duration_parser.search(s)\nif match:\nmatchdict = match.groupdict()\n@@ -132,9 +135,9 @@ class XMLValueConverter(object):\nseconds = float(matchdict.get('seconds', 0) or 0)\nminutes += hours * 60.\nminutes += (seconds / 60.)\n- return unitfloat(minutes, \"minute\")\n+ return unitfloat(minutes, 'minute')\nelse:\n- return unitstr(s, \"duration\")\n+ return unitstr(s, 'duration')\n@classmethod\ndef str_to_bool(cls, s):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -7,6 +7,7 @@ from copy import deepcopy\nimport sys\nfrom pyteomics.auxiliary import basestring\n+\n# http://stackoverflow.com/q/14246983/1258041\nclass ComparableArray(np.ndarray):\ndef __eq__(self, other):\n@@ -15,11 +16,13 @@ class ComparableArray(np.ndarray):\nother = np.asarray(other, dtype=np.float)\nreturn self.shape == other.shape and np.allclose(self, other)\n+\ndef makeCA(arr):\nif not isinstance(arr, np.ndarray):\narr = np.array(arr)\nreturn ComparableArray(arr.shape, arr.dtype, arr)\n+\npepxml_results = [\n{'spectrum': 'pps_sl20060731_18mix_25ul_r1_1154456409.0100.0100.1',\n'end_scan': 100,\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Parse tandem rt as duration, convert duration to float on failure |
377,522 | 07.05.2020 20:57:32 | -10,800 | e697117ef449e5e4f5f2dc6cb342ab96df31d391 | Make rt None if empty in tandem | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -3,6 +3,9 @@ dev\n- New module :py:mod:`pyteomics.openms.idxml`.\n+ - Fix `#3 <https://github.com/levitsky/pyteomics/issues/3>`_, `#5 <https://github.com/levitsky/pyteomics/issues/5>`_,\n+ and some issues in :py:mod:`tandem`.\n+\n4.2\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -130,16 +130,18 @@ class TandemXML(xml.XML):\ninfo[a['type']] = float(a['attribute'])\nif 'support' in info:\nfor d in info['support'].get('supporting data', {}).values():\n- for l in ['Xdata', 'Ydata']:\n- d[l]['values'] = d[l]['values'].astype(int)\n- del d[l]['label']\n+ for label in ['Xdata', 'Ydata']:\n+ d[label]['values'] = d[label]['values'].astype(int)\n+ del d[label]['label']\nif 'fragment ion mass spectrum' in info['support']:\nfims = info['support']['fragment ion mass spectrum']\nfims.update(fims.pop('tandem mass spectrum'))\n- for l in ['Xdata', 'Ydata']:\n- del info['support']['fragment ion mass spectrum'][l]['label']\n+ for label in ['Xdata', 'Ydata']:\n+ del info['support']['fragment ion mass spectrum'][label]['label']\nif 'charge' in info:\ninfo['charge'] = int(info['charge'])\n+ if info.get('rt') == '':\n+ info['rt'] = None\nreturn info\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make rt None if empty in tandem |
377,522 | 18.05.2020 21:02:20 | -10,800 | 229d010151173754c45d564f8bc27b6e49b01096 | Doc fixes and version bump | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-dev\n+4.3\n---\n+First release after the move to Github. Issue and PR numbers from now on refer to the\n+`Github repo <https://github.com/levitsky/pyteomics>`_. Archive of the Bibucket issues and PRs is stored\n+`here <https://levitsky.github.io/bitbucket_backup/#!/levitsky/pyteomics>`_.\n+\n+Changes in this release:\n+\n- New module :py:mod:`pyteomics.openms.idxml`.\n- Fix `#3 <https://github.com/levitsky/pyteomics/issues/3>`_, `#5 <https://github.com/levitsky/pyteomics/issues/5>`_,\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.3a2\n\\ No newline at end of file\n+4.3\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/openms/idxml.py",
"new_path": "pyteomics/openms/idxml.py",
"diff": "@@ -12,10 +12,8 @@ It defines a list of peptide identifications.\nThis module provides a minimalistic way to extract information from idXML\nfiles. You can use the old functional interface (:py:func:`read`) or the new\nobject-oriented interface (:py:class:`IDXML`) to iterate over entries in\n-``<SpectrumIdentificationResult>`` elements, i.e. groups of identifications\n-for a certain spectrum. Note that each entry can contain more than one PSM\n-(peptide-spectrum match). They are accessible with \"SpectrumIdentificationItem\"\n-key.\n+``<PeptideIdentification>`` elements. Note that each entry can contain more than one PSM\n+(peptide-spectrum match). They are accessible with ``'PeptideHit'`` key.\n:py:class:`IDXML` objects also support direct indexing by element ID.\nData access\n@@ -125,13 +123,9 @@ class IDXML(xml.IndexedXML):\n# Try not to recursively unpack the root element\n# unless the user really wants to.\nif name == self._root_element:\n- info = self._get_info(element,\n- recursive=(rec if rec is not None else False),\n- **kwargs)\n+ info = self._get_info(element, recursive=(rec if rec is not None else False), **kwargs)\nelse:\n- info = self._get_info(element,\n- recursive=(rec if rec is not None else True),\n- **kwargs)\n+ info = self._get_info(element, recursive=(rec if rec is not None else True), **kwargs)\nfor k in ['start', 'end']:\nv = info.get(k)\nif isinstance(v, list) and len(v) == 2:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Doc fixes and version bump |
377,522 | 18.05.2020 22:03:04 | -10,800 | 5de0e927e01d5a59c5e3456ccf9dab7c664d419b | Update publishing workflow | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/pythonpublish.yml",
"new_path": ".github/workflows/pythonpublish.yml",
"diff": "@@ -2,7 +2,7 @@ name: Upload Python Package\non:\nrelease:\n- types: [created]\n+ types: [published]\njobs:\ndeploy:\n@@ -10,7 +10,7 @@ jobs:\nsteps:\n- uses: actions/checkout@v2\n- name: Set up Python\n- uses: actions/setup-python@v1\n+ uses: actions/setup-python@v2\nwith:\npython-version: '3.x'\n- name: Install dependencies\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update publishing workflow |
377,522 | 26.05.2020 21:53:28 | -10,800 | 4a240d88c7dddbff0aa029c3d3b5d4ce645ae2c5 | Add test status badge | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/pythonpackage.yml",
"new_path": ".github/workflows/pythonpackage.yml",
"diff": "-name: Python package\n+name: tests\non: [push, pull_request]\n"
},
{
"change_type": "MODIFY",
"old_path": "README.rst",
"new_path": "README.rst",
"diff": "+.. image:: https://github.com/levitsky/pyteomics/workflows/tests/badge.svg\n+ :alt: Test status\n+\n.. image:: https://img.shields.io/pypi/v/pyteomics.svg\n:target: https://pypi.org/project/pyteomics/\n:alt: PyPI\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add test status badge |
377,522 | 28.05.2020 01:46:40 | -10,800 | 4425e97b67dde4a6a7ddc9cedfe4632480be4754 | Minor code brush-up, update mass.Unimod tests | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -103,6 +103,7 @@ abundant isotopes and a separate entry for undefined isotope with zero\nkey, mass of the most abundant isotope and 1.0 abundance.\n\"\"\"\n+\ndef _make_isotope_string(element_name, isotope_num):\n\"\"\"Form a string label for an isotope.\"\"\"\nif isotope_num == 0:\n@@ -110,6 +111,7 @@ def _make_isotope_string(element_name, isotope_num):\nelse:\nreturn '{}[{}]'.format(element_name, isotope_num)\n+\ndef _parse_isotope_string(label):\n\"\"\"Parse an string with an isotope label and return the element name and\nthe isotope number.\n@@ -123,6 +125,7 @@ def _parse_isotope_string(label):\nisotope_num = int(num) if num else 0\nreturn element_name, isotope_num\n+\n# Initialize std_aa_comp and std_ion_comp before the Composition class\n# description, fill it later.\nstd_aa_comp = {}\n@@ -142,6 +145,7 @@ _isotope_string = r'^([A-Z][a-z+]*)(?:\\[(\\d+)\\])?$'\n_atom = r'([A-Z][a-z+]*)(?:\\[(\\d+)\\])?([+-]?\\d+)?'\n_formula = r'^({})*$'.format(_atom)\n+\nclass Composition(BasicComposition):\n\"\"\"\nA Composition object stores a chemical composition of a\n@@ -169,8 +173,7 @@ class Composition(BasicComposition):\ncomp[elem] += cnt\nexcept (PyteomicsError, KeyError):\n- raise PyteomicsError(\n- 'No information for %s in `aa_comp`' % aa)\n+ raise PyteomicsError('No information for %s in `aa_comp`' % aa)\nself._from_composition(comp)\ndef _from_split_sequence(self, split_sequence, aa_comp):\n@@ -204,10 +207,9 @@ class Composition(BasicComposition):\nif not re.match(_formula, formula):\nraise PyteomicsError('Invalid formula: ' + formula)\nfor elem, isotope, number in re.findall(_atom, formula):\n- if not elem in mass_data:\n+ if elem not in mass_data:\nraise PyteomicsError('Unknown chemical element: ' + elem)\n- self[_make_isotope_string(elem, int(isotope) if isotope else 0)\n- ] += int(number) if number else 1\n+ self[_make_isotope_string(elem, int(isotope) if isotope else 0)] += int(number) if number else 1\ndef _from_composition(self, comp):\nfor isotope_string, num_atoms in comp.items():\n@@ -282,7 +284,6 @@ class Composition(BasicComposition):\naa_comp = kwargs.get('aa_comp', std_aa_comp)\nmass_data = kwargs.get('mass_data', nist_mass)\n-\nkw_given = self._kw_sources.intersection(kwargs)\nif len(kw_given) > 1:\nraise PyteomicsError('Only one of {} can be specified!\\n'\n@@ -311,7 +312,7 @@ class Composition(BasicComposition):\nelse:\ntry:\nself._from_sequence(parser.tostring(args[0], True), aa_comp)\n- except:\n+ except Exception:\nraise PyteomicsError('Could not create a Composition object'\n' from `{}`. A Composition object must be '\n'specified by sequence, parsed or split sequence,'\n@@ -327,9 +328,7 @@ class Composition(BasicComposition):\ncharge = self['H+']\nif 'charge' in kwargs:\nif charge:\n- raise PyteomicsError(\n- 'Charge is specified both by the number of protons and '\n- '`charge` in kwargs')\n+ raise PyteomicsError('Charge is specified both by the number of protons and `charge` in kwargs')\ncharge = kwargs['charge']\nself['H+'] = charge\n@@ -385,6 +384,7 @@ class Composition(BasicComposition):\nmass /= charge\nreturn mass\n+\nstd_aa_comp.update({\n'A': Composition({'H': 5, 'C': 3, 'O': 1, 'N': 1}),\n'C': Composition({'H': 5, 'C': 3, 'S': 1, 'O': 1, 'N': 1}),\n@@ -412,6 +412,7 @@ std_aa_comp.update({\n'-OH': Composition({'O': 1, 'H': 1}),\n})\n+\nstd_ion_comp.update({\n'M': Composition(formula=''),\n'M-H2O': Composition(formula='H-2O-1'),\n@@ -501,11 +502,10 @@ def calculate_mass(*args, **kwargs):\nmass : float\n\"\"\"\n# Make a copy of `composition` keyword argument.\n- composition = (Composition(kwargs['composition'])\n- if 'composition' in kwargs\n- else Composition(*args, **kwargs))\n+ composition = (Composition(kwargs['composition']) if 'composition' in kwargs else Composition(*args, **kwargs))\nreturn composition.mass(**kwargs)\n+\ndef most_probable_isotopic_composition(*args, **kwargs):\n\"\"\"Calculate the most probable isotopic composition of a peptide\nmolecule/ion defined by a sequence string, parsed sequence,\n@@ -562,12 +562,9 @@ def most_probable_isotopic_composition(*args, **kwargs):\nisotopic_composition = Composition()\nfor element_name in composition:\n- if (not elements_with_isotopes\n- or (element_name in elements_with_isotopes)):\n+ if not elements_with_isotopes or (element_name in elements_with_isotopes):\n# Take the two most abundant isotopes.\n- first_iso, second_iso = sorted(\n- [(i[0], i[1][1])\n- for i in mass_data[element_name].items() if i[0]],\n+ first_iso, second_iso = sorted([(i[0], i[1][1]) for i in mass_data[element_name].items() if i[0]],\nkey=lambda x: -x[1])[:2]\n# Write the number of isotopes of the most abundant type.\n@@ -577,16 +574,13 @@ def most_probable_isotopic_composition(*args, **kwargs):\n# Write the number of the second isotopes.\nsecond_iso_str = _make_isotope_string(element_name, second_iso[0])\n- isotopic_composition[second_iso_str] = (\n- composition[element_name]\n- - isotopic_composition[first_iso_str])\n+ isotopic_composition[second_iso_str] = composition[element_name] - isotopic_composition[first_iso_str]\nelse:\nisotopic_composition[element_name] = composition[element_name]\nreturn (isotopic_composition,\n- isotopic_composition_abundance(\n- composition=isotopic_composition,\n- mass_data=mass_data))\n+ isotopic_composition_abundance(composition=isotopic_composition, mass_data=mass_data))\n+\ndef isotopic_composition_abundance(*args, **kwargs):\n\"\"\"Calculate the relative abundance of a given isotopic composition\n@@ -622,15 +616,11 @@ def isotopic_composition_abundance(*args, **kwargs):\n# If there is already an entry for this element and either it\n# contains a default isotope or newly added isotope is default\n# then raise an exception.\n- if ((element_name in isotopic_composition)\n- and (isotope_num == 0\n- or 0 in isotopic_composition[element_name])):\n+ if (element_name in isotopic_composition) and (isotope_num == 0 or 0 in isotopic_composition[element_name]):\nraise PyteomicsError(\n- 'Please specify the isotopic states of all atoms of '\n- '%s or do not specify them at all.' % element_name)\n+ 'Please specify the isotopic states of all atoms of %s or do not specify them at all.' % element_name)\nelse:\n- isotopic_composition[element_name][isotope_num] = (\n- composition[element])\n+ isotopic_composition[element_name][isotope_num] = composition[element]\n# Calculate relative abundance.\nmass_data = kwargs.get('mass_data', nist_mass)\n@@ -640,11 +630,11 @@ def isotopic_composition_abundance(*args, **kwargs):\nfor isotope_num, isotope_content in isotope_dict.items():\ndenom *= math.factorial(isotope_content)\nif isotope_num:\n- num2 *= (mass_data[element_name][isotope_num][1]\n- ** isotope_content)\n+ num2 *= mass_data[element_name][isotope_num][1] ** isotope_content\nreturn num2 * (num1 / denom)\n+\ndef isotopologues(*args, **kwargs):\n\"\"\"Iterate over possible isotopic states of a molecule.\nThe molecule can be defined by formula, sequence, parsed sequence, or composition.\n@@ -725,6 +715,7 @@ def isotopologues(*args, **kwargs):\nelse:\nyield ic\n+\nstd_aa_mass = {\n'G': 57.02146,\n'A': 71.03711,\n@@ -753,6 +744,7 @@ std_aa_mass = {\namino acid residues, selenocysteine and pyrrolysine.\n\"\"\"\n+\ndef fast_mass(sequence, ion_type=None, charge=None, **kwargs):\n\"\"\"Calculate monoisotopic mass of an ion using the fast\nalgorithm. May be used only if amino acid residues are presented in\n@@ -800,14 +792,14 @@ def fast_mass(sequence, ion_type=None, charge=None, **kwargs):\nexcept KeyError:\nraise PyteomicsError('Unknown ion type: {}'.format(ion_type))\n- mass += sum(mass_data[element][0][0] * num\n- for element, num in icomp.items())\n+ mass += sum(mass_data[element][0][0] * num for element, num in icomp.items())\nif charge:\nmass = (mass + mass_data['H+'][0][0] * charge) / charge\nreturn mass\n+\ndef fast_mass2(sequence, ion_type=None, charge=None, **kwargs):\n\"\"\"Calculate monoisotopic mass of an ion using the fast\nalgorithm. *modX* notation is fully supported.\n@@ -861,8 +853,7 @@ def fast_mass2(sequence, ion_type=None, charge=None, **kwargs):\nmod, X = parser._split_label(aa)\nmass += (aa_mass[mod] + aa_mass[X]) * num\nexcept KeyError as e:\n- raise PyteomicsError(\n- 'Unspecified mass for modification: \"{}\"'.format(e.args[0]))\n+ raise PyteomicsError('Unspecified mass for modification: \"{}\"'.format(e.args[0]))\nif ion_type:\ntry:\n@@ -878,6 +869,7 @@ def fast_mass2(sequence, ion_type=None, charge=None, **kwargs):\nreturn mass\n+\nclass Unimod():\n\"\"\"A class for Unimod database of modifications.\nThe list of all modifications can be retrieved via `mods` attribute.\n@@ -901,12 +893,12 @@ class Unimod():\n\"\"\"\nfrom lxml import etree\nfrom ..xml import _local_name\n+\ndef process_mod(mod):\nd = mod.attrib\nnew_d = {}\nfor key in ('date_time_modified', 'date_time_posted'):\n- new_d[key] = datetime.strptime(d.pop(key),\n- '%Y-%m-%d %H:%M:%S')\n+ new_d[key] = datetime.strptime(d.pop(key), '%Y-%m-%d %H:%M:%S')\ncomp = Composition()\nfor delta in self._xpath('delta', mod): # executed 1 time\nfor key in ('avge_mass', 'mono_mass'):\n@@ -915,15 +907,15 @@ class Unimod():\ne_d = elem.attrib\namount = int(e_d.pop('number'))\nlabel = e_d.pop('symbol')\n- isotope, symbol = re.match('^(\\d*)(\\D+)$', label).groups()\n- if not isotope: isotope = 0\n- else: isotope = int(isotope)\n- comp += Composition(\n- formula = _make_isotope_string(symbol, isotope),\n- mass_data = self._massdata) * amount\n+ isotope, symbol = re.match(r'^(\\d*)(\\D+)$', label).groups()\n+ if not isotope:\n+ isotope = 0\n+ else:\n+ isotope = int(isotope)\n+ comp += Composition(formula=_make_isotope_string(symbol, isotope), mass_data=self._massdata) * amount\nnew_d['composition'] = comp\nnew_d['record_id'] = int(d.pop('record_id'))\n- new_d['approved'] = (d.pop('approved') == '1')\n+ new_d['approved'] = d.pop('approved') == '1'\nnew_d.update(d)\nspec = []\nfor sp in self._xpath('specificity', mod):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mass.py",
"new_path": "tests/test_mass.py",
"diff": "@@ -3,6 +3,7 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport unittest\nimport random\n+import pickle\nfrom pyteomics import mass, auxiliary, parser\nimport gzip\n@@ -59,60 +60,47 @@ class MassTest(unittest.TestCase):\nfor pep in self.random_peptides:\nself.assertAlmostEqual(\nmass.fast_mass(pep, aa_mass=self.test_aa_mass),\n- sum(pep.count(aa) * m\n- for aa, m in self.test_aa_mass.items())\n- + self.mass_H * 2.0 + self.mass_O)\n+ sum(pep.count(aa) * m for aa, m in self.test_aa_mass.items()) + self.mass_H * 2.0 + self.mass_O)\ndef test_fast_mass2(self):\nfor pep in self.random_peptides:\nself.assertAlmostEqual(\nmass.fast_mass2(pep, aa_mass=self.test_aa_mass),\n- sum(pep.count(aa) * m\n- for aa, m in self.test_aa_mass.items())\n- + self.mass_H * 2.0 + self.mass_O)\n-\n+ sum(pep.count(aa) * m for aa, m in self.test_aa_mass.items()) + self.mass_H * 2.0 + self.mass_O)\ndef test_Composition_dict(self):\n# Test Composition from a dict.\n- self.assertEqual(\n- mass.Composition(self.d, mass_data=self.mass_data), self.d)\n+ self.assertEqual(mass.Composition(self.d, mass_data=self.mass_data), self.d)\ndef test_Composition_formula(self):\n# Test Composition from a formula.\n- self.assertEqual(self.d,\n- mass.Composition(formula='ABCDE',\n- mass_data={atom: {0: (1.0, 1.0)} for atom in 'ABCDE'}))\n+ self.assertEqual(self.d, mass.Composition(formula='ABCDE', mass_data={atom: {0: (1.0, 1.0)} for atom in 'ABCDE'}))\ndef test_Composition_seq(self):\n# Test Composition from a sequence.\n- self.assertEqual(self.d,\n- mass.Composition(sequence='XYZ', aa_comp=self.aa_comp))\n+ self.assertEqual(self.d, mass.Composition(sequence='XYZ', aa_comp=self.aa_comp))\ndef test_Composition_pseq(self):\n# Test Composition from a parsed sequence.\nself.assertEqual(\n- mass.Composition(parsed_sequence=['X', 'Y', 'Z'],\n- aa_comp=self.aa_comp),\n+ mass.Composition(parsed_sequence=['X', 'Y', 'Z'], aa_comp=self.aa_comp),\n{atom: 1 for atom in 'ABC'})\ndef test_Composition_sseq(self):\n# Test Composition from a split sequence.\nself.assertEqual(\n- mass.Composition(split_sequence=[('X',), ('Y',), ('Z',)],\n- aa_comp=self.aa_comp),\n+ mass.Composition(split_sequence=[('X',), ('Y',), ('Z',)], aa_comp=self.aa_comp),\n{atom: 1 for atom in 'ABC'})\ndef test_Composition_sum(self):\n# Test sum of Composition objects.\nself.assertEqual(\n- mass.Composition(sequence='XXY', aa_comp=self.aa_comp)\n- + mass.Composition(sequence='YZZ', aa_comp=self.aa_comp),\n+ mass.Composition(sequence='XXY', aa_comp=self.aa_comp) + mass.Composition(sequence='YZZ', aa_comp=self.aa_comp),\n{atom: 2 for atom in 'ABCDE'})\ndef test_Composition_sub(self):\n# Test subtraction of Composition objects\n- self.assertEqual({}\n- - mass.Composition(sequence='XYZ', aa_comp=self.aa_comp),\n+ self.assertEqual({} - mass.Composition(sequence='XYZ', aa_comp=self.aa_comp),\n{atom: -1 for atom in 'ABCDE'})\ndef test_Composition_mul(self):\n@@ -128,10 +116,8 @@ class MassTest(unittest.TestCase):\n# Test creation from positional args\nac = self.aa_comp.copy()\nac.update(self.mods)\n- self.assertEqual(mass.Composition('aXbYZ', aa_comp=ac),\n- {'A': 2, 'B': 2, 'C': 1, 'D': 1, 'E': 1})\n- self.assertEqual(mass.Composition('AB2C3', mass_data=self.mass_data),\n- {'A': 1, 'B': 2, 'C': 3})\n+ self.assertEqual(mass.Composition('aXbYZ', aa_comp=ac), {'A': 2, 'B': 2, 'C': 1, 'D': 1, 'E': 1})\n+ self.assertEqual(mass.Composition('AB2C3', mass_data=self.mass_data), {'A': 1, 'B': 2, 'C': 3})\ndef test_calculate_mass(self):\n# Calculate mass by a formula.\n@@ -148,110 +134,79 @@ class MassTest(unittest.TestCase):\n# Calculate mass by a parsed sequence.\nself.assertEqual(\n- mass.calculate_mass(parsed_sequence=['H-','X','Y','Z','-OH'],\n- aa_comp=self.aa_comp,\n- mass_data=self.mass_data),\n+ mass.calculate_mass(parsed_sequence=['H-', 'X', 'Y', 'Z', '-OH'], aa_comp=self.aa_comp, mass_data=self.mass_data),\nsum(self.mass_data[atom][0][0] for atom in 'ABCDE'))\n# Calculate average mass by a formula.\nself.assertEqual(\n- mass.calculate_mass(formula='ABCDE',\n- average=True,\n- mass_data=self.mass_data),\n- sum(self.mass_data[atom][isotope][0]\n- * self.mass_data[atom][isotope][1]\n- for atom in 'ABCDE'\n- for isotope in self.mass_data[atom] if isotope != 0))\n+ mass.calculate_mass(formula='ABCDE', average=True, mass_data=self.mass_data),\n+ sum(self.mass_data[atom][isotope][0] * self.mass_data[atom][isotope][1]\n+ for atom in 'ABCDE'for isotope in self.mass_data[atom] if isotope != 0))\n# Calculate m/z of an ion.\nfor charge in [1, 2, 3]:\nself.assertEqual(\n- mass.calculate_mass(formula='ABCDE',\n- ion_type='M',\n- charge=charge,\n- mass_data=self.mass_data),\n- mass.calculate_mass(formula='ABCDE'+'H+%d' % (charge,),\n- mass_data=self.mass_data))\n+ mass.calculate_mass(formula='ABCDE', ion_type='M', charge=charge, mass_data=self.mass_data),\n+ mass.calculate_mass(formula='ABCDE' + 'H+%d' % (charge,), mass_data=self.mass_data))\nself.assertEqual(\n- mass.calculate_mass(formula='ABCDE',\n- ion_type='M',\n- charge=charge,\n- mass_data=self.mass_data),\n- (mass.calculate_mass(formula='ABCDE',\n- mass_data=self.mass_data)\n- + self.mass_data['H+'][0][0] * charge\n- ) / charge)\n-\n- self.assertRaises(\n- auxiliary.PyteomicsError,\n- mass.calculate_mass,\n- **{'formula': 'ABCDEH+%d' % charge,\n- 'ion_type': 'M',\n- 'charge': charge,\n- 'mass_data': self.mass_data})\n+ mass.calculate_mass(formula='ABCDE', ion_type='M', charge=charge, mass_data=self.mass_data),\n+ (mass.calculate_mass(formula='ABCDE', mass_data=self.mass_data) + self.mass_data['H+'][0][0] * charge) / charge)\n+\n+ self.assertRaises(auxiliary.PyteomicsError, mass.calculate_mass, **{'formula': 'ABCDEH+%d' % charge,\n+ 'ion_type': 'M', 'charge': charge, 'mass_data': self.mass_data})\n# Sanity check.\nfor pep in self.random_peptides:\n- self.assertEqual(mass.calculate_mass(\n- sequence=pep, aa_comp=self.aa_comp, mass_data=self.mass_data,\n- ion_comp=self.ion_comp),\n- mass.calculate_mass(\n- parsed_sequence=parser.parse(\n- pep, labels=['X', 'Y', 'Z'], show_unmodified_termini=True),\n- aa_comp=self.aa_comp, mass_data=self.mass_data,\n- ion_comp=self.ion_comp))\n+ self.assertEqual(\n+ mass.calculate_mass(sequence=pep, aa_comp=self.aa_comp, mass_data=self.mass_data, ion_comp=self.ion_comp),\n+ mass.calculate_mass(parsed_sequence=parser.parse(pep, labels=['X', 'Y', 'Z'], show_unmodified_termini=True),\n+ aa_comp=self.aa_comp, mass_data=self.mass_data, ion_comp=self.ion_comp))\ndef test_most_probable_isotopic_composition(self):\nself.assertEqual(\n- mass.most_probable_isotopic_composition(\n- formula='F',\n- mass_data=self.mass_data),\n- (mass.Composition({'F[6]': 1, 'F[7]': 0},\n- mass_data=self.mass_data), 0.7))\n+ mass.most_probable_isotopic_composition(formula='F', mass_data=self.mass_data),\n+ (mass.Composition({'F[6]': 1, 'F[7]': 0}, mass_data=self.mass_data), 0.7))\nself.assertEqual(\n- mass.most_probable_isotopic_composition(\n- formula='F10',\n- mass_data=self.mass_data),\n- (mass.Composition({'F[6]': 7, 'F[7]': 3},\n- mass_data=self.mass_data),\n- (0.3)**3 * (0.7)**7 * 120))\n+ mass.most_probable_isotopic_composition(formula='F10', mass_data=self.mass_data),\n+ (mass.Composition({'F[6]': 7, 'F[7]': 3}, mass_data=self.mass_data), (0.3)**3 * (0.7)**7 * 120))\nself.assertEqual(\n- mass.most_probable_isotopic_composition(\n- formula='A20F10',\n- elements_with_isotopes = ['F'],\n- mass_data=self.mass_data),\n- (mass.Composition({'A': 20, 'F[6]': 7, 'F[7]': 3},\n- mass_data=self.mass_data),\n- (0.3)**3 * (0.7)**7 * 120))\n+ mass.most_probable_isotopic_composition(formula='A20F10', elements_with_isotopes=['F'], mass_data=self.mass_data),\n+ (mass.Composition({'A': 20, 'F[6]': 7, 'F[7]': 3}, mass_data=self.mass_data), (0.3)**3 * (0.7)**7 * 120))\ndef test_isotopic_composition_abundance(self):\nfor peplen in range(1, 10):\nself.assertAlmostEqual(\n- mass.isotopic_composition_abundance(formula='F[6]' * peplen,\n- mass_data=self.mass_data),\n+ mass.isotopic_composition_abundance(formula='F[6]' * peplen, mass_data=self.mass_data),\nself.mass_data['F'][6][1] ** peplen)\nself.assertAlmostEqual(\n- mass.isotopic_composition_abundance(formula='AF[6]' * peplen,\n- mass_data=self.mass_data),\n+ mass.isotopic_composition_abundance(formula='AF[6]' * peplen, mass_data=self.mass_data),\nself.mass_data['F'][6][1] ** peplen)\nself.assertAlmostEqual(\n- mass.isotopic_composition_abundance(\n- formula='A[1]F[6]' * peplen,\n- mass_data=self.mass_data),\n- (self.mass_data['A'][1][1]\n- * self.mass_data['F'][6][1] ) ** peplen)\n+ mass.isotopic_composition_abundance(formula='A[1]F[6]' * peplen, mass_data=self.mass_data),\n+ (self.mass_data['A'][1][1] * self.mass_data['F'][6][1]) ** peplen)\n- def test_Unimod(self):\n+ def test_Unimod_mass(self):\ndb = mass.Unimod(gzip.open('unimod.xml.gz'))\nfor x in db.mods:\nself.assertGreater(0.00001,\n- abs(x['mono_mass'] - mass.calculate_mass(x['composition'],\n- mass_data=db.mass_data)))\n+ abs(x['mono_mass'] - mass.calculate_mass(x['composition'], mass_data=db.mass_data)))\n+\n+ def test_Unimod_methods(self):\n+ db = mass.Unimod(gzip.open('unimod.xml.gz'))\n+ rec_id = 1\n+ rec_name = 'Acetylation'\n+ rec_title = 'Acetyl'\n+ record = db.by_id(rec_id)\n+ self.assertEqual(record['title'], rec_title)\n+ self.assertEqual(record['full_name'], rec_name)\n+ self.assertEqual(record, db[rec_id])\n+ self.assertEqual(record, db.by_title(rec_title))\n+ self.assertEqual(record, db.by_name(rec_name))\ndef test_nist_mass(self):\nself.assertTrue(all(abs(g[0][1] - 1) < 1e-6 for g in mass.nist_mass.values()))\n@@ -269,7 +224,6 @@ class MassTest(unittest.TestCase):\nsplit_sequence = mass.Composition(split_sequence=[('X',), ('Y',), ('Z',)],\naa_comp=self.aa_comp)\n- import pickle\nself.assertEqual(dict_, pickle.loads(pickle.dumps(dict_)))\nself.assertEqual(formula, pickle.loads(pickle.dumps(formula)))\nself.assertEqual(sequence, pickle.loads(pickle.dumps(sequence)))\n@@ -304,7 +258,6 @@ class MassTest(unittest.TestCase):\nself.assertAlmostEqual(abundances[i], mass.isotopic_composition_abundance(state,\naa_comp=self.aa_comp, mass_data=self.mass_data))\n-\ndef test_isotopologues_with_abundances(self):\npeptide = 'XYF'\nstates = [{'F[6]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}, {'F[7]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}]\n@@ -319,5 +272,6 @@ class MassTest(unittest.TestCase):\nfor key, value in mass.std_aa_mass.items():\nself.assertAlmostEqual(value, mass.calculate_mass(parsed_sequence=[key]), places=4)\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Minor code brush-up, update mass.Unimod tests |
377,522 | 10.06.2020 17:26:12 | -10,800 | c92f9384fa34df14283bec75b9a2de9e60ffa253 | Move gravy code to electrochem with modifications | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/electrochem.py",
"new_path": "pyteomics/electrochem.py",
"diff": "@@ -8,7 +8,7 @@ Summary\nThis module is used to calculate the\nelectrochemical properties of polypeptide molecules.\n-The theory behind this module is based on the Henderson-Hasselbalch\n+The theory behind most of this module is based on the Henderson-Hasselbalch\nequation and was thoroughly described in a number of sources [#Aronson]_,\n[#Moore]_.\n@@ -96,6 +96,7 @@ References\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+from __future__ import division\nfrom . import parser\nfrom .auxiliary import PyteomicsError\nfrom collections import Iterable, Counter\n@@ -416,6 +417,58 @@ different human cell types defined in a pH scale where isoelectric points\ncorrelate with polypeptide compositions. Electrophoresis 1994, 15, 529-539.\n\"\"\"\n+hydropathicity_KD = {\n+ \"A\": 1.800,\n+ \"R\": -4.500,\n+ \"N\": -3.500,\n+ \"D\": -3.500,\n+ \"C\": 2.500,\n+ \"Q\": -3.500,\n+ \"E\": -3.500,\n+ \"G\": -0.400,\n+ \"H\": -3.200,\n+ \"I\": 4.500,\n+ \"L\": 3.800,\n+ \"K\": -3.900,\n+ \"M\": 1.900,\n+ \"F\": 2.800,\n+ \"P\": -1.600,\n+ \"S\": -0.800,\n+ \"T\": -0.700,\n+ \"W\": -0.900,\n+ \"Y\": -1.300,\n+ \"V\": 4.200,\n+}\n+\"\"\"\n+A set of hydropathicity indexes obtained from Kyte J., Doolittle F. J. Mol. Biol. 157:105-132 (1982).\n+\"\"\"\n+\n+def gravy(sequence, hydropathicity=hydropathicity_KD):\n+ \"\"\"\n+ Calculate GRand AVerage of hYdropathicity (GRAVY) index for amino acid sequence.\n+\n+ Parameters\n+ ----------\n+ sequence : str\n+ Polypeptide sequence in one-letter format.\n+ hydropathicity : dict, optional\n+ Hydropathicity indexes of amino acids. Default is :py:data:`hydropathicity_KD`.\n+\n+ Returns\n+ -------\n+ out : float\n+ Rand AVerage of hYdropathicity (GRAVY) index.\n+\n+ Examples\n+ >>> gravy('PEPTIDE')\n+ -1.4375\n+ \"\"\"\n+ try:\n+ return sum(hydropathicity[aa] for aa in sequence) / len(sequence)\n+ except KeyError as e:\n+ raise PyteomicsError(\"Hydropathicity for amino acid {} not provided.\".format(e.args[0]))\n+\n+\nif __name__ == \"__main__\":\nimport doctest\n"
},
{
"change_type": "DELETE",
"old_path": "pyteomics/gravy.py",
"new_path": null,
"diff": "-# -*- coding: utf-8 -*-\n-\"\"\"\n-Calculate GRand AVerage of hYdropathicity (GRAVY) index for amino acid sequence\n-\n-Indices obtained from Kyte J., Doolittle F. J. Mol. Biol. 157:105-132 (1982)\n-\"\"\"\n-import re\n-from collections import Counter\n-\n-\"\"\"\n-Hydropathicity indices\n-\"\"\"\n-AA_hidropaticity = {\n- \"A\": 1.800,\n- \"R\": -4.500,\n- \"N\": -3.500,\n- \"D\": -3.500,\n- \"C\": 2.500,\n- \"Q\": -3.500,\n- \"E\": -3.500,\n- \"G\": -0.400,\n- \"H\": -3.200,\n- \"I\": 4.500,\n- \"L\": 3.800,\n- \"K\": -3.900,\n- \"M\": 1.900,\n- \"F\": 2.800,\n- \"P\": -1.600,\n- \"S\": -0.800,\n- \"T\": -0.700,\n- \"W\": -0.900,\n- \"Y\": -1.300,\n- \"V\": 4.200,\n-}\n-\n-# Regex to select letters that are not in standard FASTA format\n-invalidFasta = re.compile(\"[^{}]\".format(\"\".join(AA_hidropaticity.keys())))\n-\n-\"\"\"\n-Calculate GRAVY index\n-\n-Parameters:\n- sequence\n- string\n- Amino acid sequence\n-\n-Returns:\n- numeric\n- GRAVY index\n-\"\"\"\n-\n-\n-def gravy(sequence):\n- sequence = re.sub(invalidFasta, \"\", sequence)\n- return sum([AA_hidropaticity[aa] for aa in sequence]) / len(sequence)\n-\n-\n-\"\"\"\n-Calculate amino acid content of a sequence\n-\n-Parameters:\n- sequence\n- string\n- Amino acid sequence\n-\n- frequencies\n- bool\n- return amino acid counts (`False`) or amino acid frequencies (`True`)\n- Default: `True`\n-\n-Returns:\n- `dict`\n- Dictionary with amino acids as keys and counts/frequencies as values\n-\"\"\"\n-\n-\n-def AAContent(sequence, frequencies=True):\n- AACont = Counter(sequence)\n-\n- if frequencies:\n- for k, v in AACont.iteritems():\n- AACont[k] = float(v) / len(sequence)\n-\n- return AACont\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Move gravy code to electrochem with modifications |
377,522 | 10.06.2020 20:40:21 | -10,800 | db44a29cc8fad54d4d05bb153f3b5f4c34d4c3de | Add keepstate to xml iterfind | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -505,6 +505,7 @@ class XML(FileReader):\n\"Do not use `retrieve_refs=True`.\").format(\nself.__class__.__name__))\n+ @_keepstate\ndef iterfind(self, path, **kwargs):\n\"\"\"Parse the XML and yield info on elements with specified local\nname or by specified \"XPath\".\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add keepstate to xml iterfind |
377,522 | 11.06.2020 18:03:18 | -10,800 | a328ca30adecb7cf207c493a7b2e9b9c81595ce6 | Update electrochem doc | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/electrochem.py",
"new_path": "pyteomics/electrochem.py",
"diff": "@@ -22,13 +22,20 @@ where the sum is taken over all ionizable groups of the polypeptide, and\n:math:`Q_i` is -1 and +1 for acidic and basic functional groups,\nrespectively.\n-Main functions\n---------------\n+Charge and pI functions\n+-----------------------\n:py:func:`charge` - calculate the charge of a polypeptide\n:py:func:`pI` - calculate the isoelectric point of a polypeptide\n+\n+GRand AVerage of hYdropathicity (GRAVY)\n+---------------------------------------\n+\n+ :py:func:`gravy` - calculate the GRAVY index of a polypeptide\n+\n+\nData\n----\n@@ -47,6 +54,9 @@ Data\n:py:data:`pK_cterm_bjellqvist` - a set of C-terminal pK from [#Bjellqvist]_.\n+ :py:data:`hydropathicity_KD` - a set of hydropathicity indexes from [#Kyte]_.\n+\n+\nReferences\n----------\n@@ -78,6 +88,11 @@ References\ncorrelate with polypeptide compositions. Electrophoresis 1994, 15, 529-539.\n`Link. <http://dx.doi.org/10.1002/elps.1150150171>`_\n+.. [#Kyte] Kyte, J.; Doolittle, R. F..\n+ A simple method for displaying the hydropathic character of a protein.\n+ Journal of molecular biology 1982, 157 (1), 105-32.\n+ `Link. <https://doi.org/10.1016/0022-2836(82)90515-0>`_\n+\n-------------------------------------------------------------------------------\n\"\"\"\n@@ -101,6 +116,7 @@ from . import parser\nfrom .auxiliary import PyteomicsError\nfrom collections import Iterable, Counter\n+\ndef charge(sequence, pH, **kwargs):\n\"\"\"Calculate the charge of a polypeptide in given pH or list of pHs using\na given list of amino acid electrochemical properties.\n@@ -152,6 +168,7 @@ def charge(sequence, pH, **kwargs):\ncharge_list = _charge_for_dict(peptide_dict, pH_list, pK)\nreturn charge_list[0] if not isinstance(pH, Iterable) else charge_list\n+\ndef _prepare_charge_dict(sequence, **kwargs):\nnterm = cterm = n_aa = c_aa = None\npK = kwargs.get('pK', pK_lehninger).copy()\n@@ -227,6 +244,7 @@ def _prepare_charge_dict(sequence, **kwargs):\nreturn peptide_dict, pK\n+\ndef _charge_for_dict(peptide_dict, pH_list, pK):\n# Calculate the charge for each value of pH.\ncharge_list = []\n@@ -240,6 +258,7 @@ def _charge_for_dict(peptide_dict, pH_list, pK):\nreturn charge_list\n+\ndef pI(sequence, pI_range=(0.0, 14.0), precision_pI=0.01, **kwargs):\n\"\"\"Calculate the isoelectric point of a polypeptide using a given set\nof amino acids' electrochemical properties.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update electrochem doc |
377,533 | 06.07.2020 10:22:55 | -10,800 | 703b1652ee42a0c43ce684f035d61e7fe70e75d9 | Fix xpath, improve pepxml.roc_curve()
Fixes xpath bugs in pepxml.roc_curve() that caused a fatal error [XPathEvalError: Invalid predicate]; adds "charge" and "tag" keys to the output to make the function usable in real world. | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -288,26 +288,27 @@ def roc_curve(source):\nReturns\n-------\nout : list\n- A list of ROC points, sorted by ascending min prob.\n+ A list of ROC points.\n\"\"\"\nparser = etree.XMLParser(remove_comments=True, ns_clean=True)\ntree = etree.parse(source, parser=parser)\nroc_curve = []\n- for roc_element in tree.xpath(\n- \"/*[local-name()='msms_pipeline_analysis']\"\n- \"/*[local-name()='analysis_summary and @analysis='peptideprophet']\"\n- \"/*[local-name()='peptideprophet_summary']\"\n- \"/*[local-name()='roc_data_point']\"):\n-\n- roc_data_point = dict(roc_element.attrib)\n- for key in roc_data_point:\n- roc_data_point[key] = float(roc_data_point[key])\n- roc_curve.append(roc_data_point)\n-\n- return sorted(roc_curve, key=lambda x: x['min_prob'])\n-\n+ for roc_error_data in tree.xpath(\n+ \"/*[local-name()='msms_pipeline_analysis'] \\\n+ //*[local-name()='analysis_summary' and @analysis='peptideprophet'] \\\n+ //*[local-name()='peptideprophet_summary'] \\\n+ //*[local-name()='roc_error_data']\"):\n+ for element in roc_error_data.xpath(\"*[local-name()='roc_data_point' or local-name()='error_point']\"):\n+ data_point = dict(element.attrib)\n+ for key in data_point:\n+ data_point[key] = float(data_point[key])\n+ data_point[\"charge\"] = roc_error_data.attrib[\"charge\"]\n+ data_point[\"tag\"] = etree.QName(element).localname\n+ roc_curve.append(data_point)\n+\n+ return roc_curve\n# chain = aux._make_chain(read, 'read')\nchain = aux.ChainBase._make_chain(read)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix xpath, improve pepxml.roc_curve()
Fixes xpath bugs in pepxml.roc_curve() that caused a fatal error [XPathEvalError: Invalid predicate]; adds "charge" and "tag" keys to the output to make the function usable in real world. |
377,523 | 16.07.2020 13:57:19 | -7,200 | 31cdefe420fe84d3c6264eebdf65e4aaa9100dc2 | Implement parsing of MGF files that contain ion names | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -7,7 +7,7 @@ from . import patch as __patch\nfrom .structures import (\nPyteomicsError, Charge, ChargeList,\n- _parse_charge, BasicComposition,\n+ _parse_charge, _parse_ion, BasicComposition,\nunitfloat, unitint, unitstr, cvstr,\ncvquery)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -244,11 +244,11 @@ class IndexedReaderMixin(NoOpBaseReader):\nindex = self.default_index\nif index is None:\nraise PyteomicsError('Access by ID requires building an offset index.')\n- offsets = index[elem_id]\n+ offsets = index[str(elem_id)]\nreturn self._item_from_offsets(offsets)\ndef get_by_ids(self, ids):\n- return [self.get_by_id(key) for key in ids]\n+ return [self.get_by_id(str(key)) for key in ids]\ndef get_by_index(self, i):\ntry:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/structures.py",
"new_path": "pyteomics/auxiliary/structures.py",
"diff": "@@ -82,6 +82,15 @@ class Charge(int):\ndef __str__(self):\nreturn str(abs(self)) + '+-'[self < 0]\n+class Ion(str):\n+ \"\"\"Represents an Ion, right now just a subclass of String.\n+ \"\"\"\n+ def __init__(self, *args, **kwargs):\n+ if args and isinstance(args[0], basestring):\n+ try:\n+ self.ion_type, self.neutral_loss, self.charge = re.match(r'([abcxyz]\\d+(\\-H2O|\\-NH3)?)([\\+|-]\\d+)', args[0]).groups() #\"y2-H2O+1\"\n+ except Exception as e:\n+ raise PyteomicsError(\"Malformed ion string, must match the regex r'([abcxyz]\\d+(\\-H2O|\\-NH3)?)([\\+|-]\\d+)'\")\nclass ChargeList(list):\n\"\"\"Just a list of :py:class:`Charge`s. When printed, looks like an\n@@ -117,6 +126,11 @@ def _parse_charge(s, list_only=False):\npass\nreturn ChargeList(s)\n+def _parse_ion(ion_text):\n+ try:\n+ return Ion(ion_text)\n+ except Exception as e:\n+ print(e)\nclass BasicComposition(defaultdict, Counter):\n\"\"\"A generic dictionary for compositions.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -84,10 +84,11 @@ class MGFBase(object):\n_array_converters = {\n'm/z array': [_identity, _array, _array],\n'intensity array': [_identity, _array, _array],\n- 'charge array': [_identity, _array, _ma]\n+ 'charge array': [_identity, _array, _ma],\n+ 'ion array': [_identity, _array, _array]\n}\n- _array_keys = ['m/z array', 'intensity array', 'charge array']\n- _array_keys_unicode = [u'm/z array', u'intensity array', u'charge array']\n+ _array_keys = ['m/z array', 'intensity array', 'charge array', 'ion array']\n+ _array_keys_unicode = [u'm/z array', u'intensity array', u'charge array', u'ion array']\nencoding = None\n@@ -107,7 +108,7 @@ class MGFBase(object):\nDefault is :py:const:`True`.\nconvert_arrays : one of {0, 1, 2}, optional, keyword only\n- If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.\n+ If `0`, m/z, intensities and (possibly) charges or (possibly) ions will be returned as regular lists\nIf `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.\nIf `2`, charges will be reported as a masked array (default).\nThe default option is the slowest. `1` and `2` require :py:mod:`numpy`.\n@@ -115,9 +116,13 @@ class MGFBase(object):\nread_charges : bool, optional, keyword only\nIf `True` (default), fragment charges are reported. Disabling it improves performance.\n+ read_ions : bool, optional\n+ If `True` (default: False), fragment ions are reported. Disabling it improves performance.\n+ Note that right now, only one of (read_charges, read_ions) may be True.\n+\ndtype : type or str or dict, optional, keyword only\ndtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\n- Keys should be 'm/z array', 'intensity array' and/or 'charge array'.\n+ Keys should be 'm/z array', 'intensity array', 'charge array' and/or 'ion array'.\nencoding : str, optional, keyword only\nFile encoding.\n@@ -129,6 +134,10 @@ class MGFBase(object):\nif self._convert_arrays and np is None:\nraise aux.PyteomicsError('numpy is required for array conversion')\nself._read_charges = kwargs.pop('read_charges', True)\n+ self._read_ions = kwargs.pop('read_ions', False)\n+ #Make sure no charges are read if ions are read\n+ if self._read_ions:\n+ self._read_charges = False\ndtype = kwargs.pop('dtype', None)\nself._dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in self._array_keys}\nif self._use_header:\n@@ -136,12 +145,16 @@ class MGFBase(object):\nelse:\nself._header = None\n+\ndef parse_precursor_charge(self, charge_text, list_only=False):\nreturn aux._parse_charge(charge_text, list_only=list_only)\ndef parse_peak_charge(self, charge_text, list_only=False):\nreturn aux._parse_charge(charge_text, list_only=False)\n+ def parse_peak_ion(self, ion_text):\n+ return aux._parse_ion(ion_text)\n+\n@property\ndef header(self):\nif self._header is None:\n@@ -173,6 +186,7 @@ class MGFBase(object):\nmasses = []\nintensities = []\ncharges = []\n+ ions = []\nparams = self.header.copy() if self._use_header else {}\n@@ -202,6 +216,8 @@ class MGFBase(object):\ndata = {'m/z array': masses, 'intensity array': intensities}\nif self._read_charges:\ndata['charge array'] = charges\n+ if self._read_ions:\n+ data['ion array'] = ions\nfor key, values in data.items():\nout[key] = self._array_converters[key][self._convert_arrays](values, dtype=self._dtype_dict.get(key))\nif self.encoding and sys.version_info.major == 2:\n@@ -221,6 +237,8 @@ class MGFBase(object):\nintensities.append(float(l[1]))\nif self._read_charges:\ncharges.append(self.parse_peak_charge(l[2]) if len(l) > 2 else 0)\n+ if self._read_ions:\n+ ions.append(self.parse_peak_ion(l[2]) if len(l) > 2 else \"\")\nexcept ValueError:\nraise aux.PyteomicsError(\n'Error when parsing %s. Line:\\n%s' % (getattr(self._source, 'name', 'MGF file'), line))\n@@ -238,10 +256,11 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\nIf created using a file object, it needs to be opened in binary mode.\nWhen iterated, :py:class:`IndexedMGF` object yields spectra one by one.\n- Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n- 'intensity array', 'charge array' and 'params'. 'm/z array' and\n+ Each 'spectrum' is a :py:class:`dict` with five keys: 'm/z array',\n+ 'intensity array', 'charge array', 'ion array' and 'params'. 'm/z array' and\n'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,\n+ 'ion_array' is an array of Ions (str)\nand 'params' stores a :py:class:`dict` of parameters (keys and values are\n:py:class:`str`, keys corresponding to MGF, lowercased).\n@@ -254,20 +273,21 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\ntime : RTLocator\nA property used for accessing spectra by retention time.\n\"\"\"\n-\ndelimiter = 'BEGIN IONS'\n- label = r'TITLE=([^\\n]*\\S)\\s*'\n+\n+\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\n- dtype=None, encoding='utf-8', _skip_index=False, **kwargs):\n+ read_ions=False, dtype=None, encoding='utf-8', index_by_scans=False, _skip_index=False, **kwargs):\n+ self.label = r'SCANS=(\\d+)\\s*' if index_by_scans else r'TITLE=([^\\n]*\\S)\\s*'\nsuper(IndexedMGF, self).__init__(source, parser_func=self._read, pass_file=False, args=(), kwargs={},\nuse_header=use_header, convert_arrays=convert_arrays, read_charges=read_charges,\n- dtype=dtype, encoding=encoding, _skip_index=_skip_index, **kwargs)\n+ read_ions=read_ions, dtype=dtype, encoding=encoding, _skip_index=_skip_index, **kwargs)\ndef __reduce_ex__(self, protocol):\nreturn (self.__class__,\n(self._source_init, False, self._convert_arrays, self._read_charges,\n- self._dtype_dict, self.encoding, True),\n+ self._read_ions, self._dtype_dict, self.encoding, True),\nself.__getstate__())\ndef __getstate__(self):\n@@ -301,7 +321,7 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\nyield spectrum\ndef get_spectrum(self, key):\n- return self.get_by_id(key)\n+ return self.get_by_id(str(key))\ndef _get_time(self, spectrum):\ntry:\n@@ -318,10 +338,11 @@ class MGF(MGFBase, aux.FileReader):\nconstant-time access to spectra.\n:py:class:`MGF` object behaves as an iterator, **yielding** spectra one by one.\n- Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n- 'intensity array', 'charge array' and 'params'. 'm/z array' and\n+ Each 'spectrum' is a :py:class:`dict` with five keys: 'm/z array',\n+ 'intensity array', 'charge array', 'ion array' and 'params'. 'm/z array' and\n'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,\n+ 'ion_array' is a masked array of Ions (str)\nand 'params' stores a :py:class:`dict` of parameters (keys and values are\n:py:class:`str`, keys corresponding to MGF, lowercased).\n@@ -334,9 +355,10 @@ class MGF(MGFBase, aux.FileReader):\n\"\"\"\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\n- dtype=None, encoding=None):\n+ read_ions=False, dtype=None, encoding=None):\nsuper(MGF, self).__init__(source, mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={},\n- encoding=encoding, use_header=use_header, convert_arrays=convert_arrays, read_charges=read_charges, dtype=dtype)\n+ encoding=encoding, use_header=use_header, convert_arrays=convert_arrays, read_charges=read_charges,\n+ read_ions=read_ions, dtype=dtype)\n# self.encoding = encoding\n@aux._keepstate_method\n@@ -360,6 +382,16 @@ class MGF(MGFBase, aux.FileReader):\nspectrum['params']['title'] = title\nreturn spectrum\n+#TODO DELETE BEFORE COMMIT\n+ # @aux._keepstate_method\n+ # def get_spectrum_by_scan(self, scan):\n+ # for line in self._source:\n+ # sline = line.strip()\n+ # if sline[:5] == 'SCANS' and sline.split('=', 1)[1].strip() == str(scan):\n+ # spectrum = self._read_spectrum()\n+ # #spectrum['params']['scans'] = scan\n+ # return spectrum\n+\ndef __getitem__(self, key):\nreturn self.get_spectrum(key)\n@@ -391,9 +423,13 @@ def read(*args, **kwargs):\nread_charges : bool, optional\nIf `True` (default), fragment charges are reported. Disabling it improves performance.\n+ read_ions : bool, optional\n+ If `True` (default: False), fragment charges are reported. Disabling it improves performance.\n+ Note that right now, only one of (read_charges, read_ions) may be True.\n+\ndtype : type or str or dict, optional\ndtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\n- Keys should be 'm/z array', 'intensity array' and/or 'charge array'.\n+ Keys should be 'm/z array', 'intensity array', 'charge array' and/or 'ion array'.\nencoding : str, optional\nFile encoding.\n@@ -510,7 +546,7 @@ _default_value_formatters = {'pepmass': _pepmass_repr, 'charge': _charge_repr}\n@aux._file_writer()\ndef write(spectra, output=None, header='', key_order=_default_key_order,\n- fragment_format=None, write_charges=True, use_numpy=None,\n+ fragment_format=None, write_charges=True, write_ions=False, use_numpy=None,\nparam_formatters=_default_value_formatters):\n\"\"\"\nCreate a file in MGF format.\n@@ -547,6 +583,10 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nIf :py:const:`False`, fragment charges from 'charge array' will not be written.\nDefault is :py:const:`True`.\n+ write_ions : bool, optional\n+ If :py:const:`False`, fragment ions from 'ion array' will not be written.\n+ Default is :py:const:`False`.\n+\nfragment_format : str, optional\nFormat string for m/z, intensity and charge of a fragment. Useful to set\nthe number of decimal places, e.g.:\n@@ -649,7 +689,7 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\ntry:\nsuccess = True\nif np is not None and use_numpy:\n- if not write_charges or 'charge array' not in spectrum:\n+ if (not write_charges or 'charge array' not in spectrum) and (not write_ions or 'ion array' not in spectrum):\nX = np.empty((len(spectrum['m/z array']), 2))\nX[:, 0] = spectrum['m/z array']\nX[:, 1] = spectrum['intensity array']\n@@ -660,6 +700,12 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nX[:, 1] = spectrum['intensity array']\nX[:, 2] = spectrum['charge array']\nnp.savetxt(output, X, fmt=np_format_3)\n+ elif isinstance(spectrum.get('ion array'), np.ndarray):\n+ X = np.empty((len(spectrum['m/z array']), 3), dtype=object)\n+ X[:, 0] = spectrum['m/z array']\n+ X[:, 1] = spectrum['intensity array']\n+ X[:, 2] = spectrum['ion array']\n+ np.savetxt(output, X, fmt=np_format_3)\nelse:\nsuccess = False\nelse:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Implement parsing of MGF files that contain ion names |
377,522 | 22.07.2020 01:35:00 | -10,800 | 75ccf2ffb5dd7b9c68653a2da6a836a0cb7ec7b6 | Update example 2 for Python 3, use static folder | [
{
"change_type": "RENAME",
"old_path": "doc/source/examples/example.mgf",
"new_path": "doc/source/_static/example.mgf",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "doc/source/examples/example.pep.xml",
"new_path": "doc/source/_static/example.pep.xml",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "doc/source/examples/example_msms.png",
"new_path": "doc/source/_static/example_msms.png",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "doc/source/examples/example_msms.py",
"new_path": "doc/source/_static/example_msms.py",
"diff": "-# This is written in Python 2 for simplicity\n-# Can be done forward-compatible easily, though\nfrom pyteomics import mgf, pepxml, mass\nimport os\n-from urllib import urlretrieve\n+from urllib.request import urlretrieve\nimport pylab\n# get the files\nfor fname in ('mgf', 'pep.xml'):\nif not os.path.isfile('example.' + fname):\n- urlretrieve('http://packages.python.org/pyteomics/_downloads/example.'\n+ urlretrieve('http://pyteomics.readthedocs.io/en/latest/_static/example.'\n+ fname, 'example.' + fname)\ndef fragments(peptide, types=('b', 'y'), maxcharge=1):\n@@ -16,9 +14,9 @@ def fragments(peptide, types=('b', 'y'), maxcharge=1):\nThe function generates all possible m/z for fragments of types\n`types` and of charges from 1 to `maxharge`.\n\"\"\"\n- for i in xrange(1, len(peptide)-1):\n+ for i in range(1, len(peptide)-1):\nfor ion_type in types:\n- for charge in xrange(1, maxcharge+1):\n+ for charge in range(1, maxcharge+1):\nif ion_type[0] in 'abc':\nyield mass.fast_mass(\npeptide[:i], ion_type=ion_type, charge=charge)\n@@ -29,6 +27,7 @@ def fragments(peptide, types=('b', 'y'), maxcharge=1):\nwith mgf.read('example.mgf') as spectra, pepxml.read('example.pep.xml') as psms:\nspectrum = next(spectra)\npsm = next(psms)\n+\npylab.figure()\npylab.title('Theoretical and experimental spectra for '\n+ psm['search_hit'][0]['peptide'])\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/conf.py",
"new_path": "doc/source/conf.py",
"diff": "@@ -75,7 +75,7 @@ master_doc = 'index'\n# General information about the project.\nproject = u'Pyteomics documentation'\n-copyright = u'2011-2019, Lev Levitsky, Anton Goloborodko, Mikhail Gorshkov'\n+copyright = u'2011-2020, Lev Levitsky, Anton Goloborodko, Mikhail Gorshkov'\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n@@ -155,7 +155,7 @@ html_title = \"%s v%s\" % (project, release)\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-html_static_path = [] #['_static']\n+html_static_path = ['_static']\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/examples/example_msms.rst",
"new_path": "doc/source/examples/example_msms.rst",
"diff": "@@ -6,9 +6,9 @@ compare it to identification info we read from a pepXML file. We are going to\ncompare the MS/MS spectrum in the file with the theoretical spectrum of a\npeptide assigned to this spectrum by the search engine.\n-The script source can be downloaded :download:`here <example_msms.py>`. We will\n-also need the :download:`example MGF file <example.mgf>` and the\n-:download:`example pepXML file <example.pep.xml>`, but the script will download\n+The script source can be downloaded :download:`here <../_static/example_msms.py>`. We will\n+also need the :download:`example MGF file <../_static/example.mgf>` and the\n+:download:`example pepXML file <../_static/example.pep.xml>`, but the script will download\nthem for you.\nThe MGF file has a single MS/MS spectrum in it. This spectrum is taken from the\n@@ -19,15 +19,15 @@ converting the results to pepXML with\nLet's start with importing the modules.\n-.. literalinclude:: example_msms.py\n+.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 1-6\n+ :lines: 1-4\nThen we'll download the files, if needed:\n-.. literalinclude:: example_msms.py\n+.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 9-12\n+ :lines: 7-10\nNow it's time to define the function that will give us *m/z* of theoretical\nfragments for a given sequence. We will use\n@@ -35,9 +35,9 @@ fragments for a given sequence. We will use\nAll we need to do is split the sequence at every bond and iterate\nover possible charges and ion types:\n-.. literalinclude:: example_msms.py\n+.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 14-27\n+ :lines: 12-25\nSo, the outer loop is over \"fragmentation sites\", the next one is over ion\ntypes, then over charges, and lastly over two parts of the sequence\n@@ -47,29 +47,31 @@ All right, now it's time to extract the info from the files.\nWe are going to use the `with` statement syntax, which is not required, but\nrecommended.\n-.. literalinclude:: example_msms.py\n+.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 29-31\n+ :lines: 27-29\nNow prepare the figure...\n-.. literalinclude:: example_msms.py\n+.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 32-36\n+ :lines: 31-35\n... plot the real spectrum:\n-.. literalinclude:: example_msms.py\n+.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 37-38\n+ :lines: 36-37\n... calculate and plot the theoretical spectrum, and show everything:\n-.. literalinclude:: example_msms.py\n+.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 39-44\n+ :lines: 38-43\n-You will see something like :download:`this <example_msms.png>`.\n+You will see something like this:\n+\n+.. image:: ../_static/example_msms.png\nThat's it, as you can see, the most intensive peaks in the spectrum are indeed\nmatched by the theoretical spectrum.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update example 2 for Python 3, use static folder |
377,522 | 22.07.2020 16:44:03 | -10,800 | 0b7d714a8bab062c82a98ac1b05a2562a372b70b | Update filtering example script | [
{
"change_type": "MODIFY",
"old_path": "doc/source/_static/filtering.py",
"new_path": "doc/source/_static/filtering.py",
"diff": "+# place this file in the same directory as example files\nimport pylab\nfrom pyteomics import tandem, pepxml, mzid, auxiliary as aux, pylab_aux as pa\nimport pandas as pd\n+import numpy as np\npylab.figure()\nwith tandem.read('example.t.xml') as tf:\n@@ -31,16 +33,14 @@ amanda = pd.read_table('example_output.csv', skiprows=1)\nmorph_filt = aux.filter(morpheus, fdr=0.01, key='Morpheus Score', reverse=True,\nis_decoy='Decoy?')\n-pylab.figure()\nmorph_filt.plot(x='Retention Time (minutes)' , y='Precursor Mass (Da)', kind='scatter')\namanda['isDecoy'] = [all(s.startswith('DECOY') for s in prot.split(';')) for prot in amanda['Protein Accessions']]\namanda_filt = aux.filter(amanda[amanda['Rank'] == 1], key='Weighted Probability', is_decoy='isDecoy', fdr=0.01)\n-amanda_pep = amanda_filt.sort('Weighted Probability').groupby('Sequence').first()\n-morph_pep = morph_filt.sort('Q-Value (%)').groupby('Base Peptide Sequence').first()\n+amanda_pep = amanda_filt.sort_values('Weighted Probability').groupby('Sequence').first()\n+morph_pep = morph_filt.sort_values('Q-Value (%)').groupby('Base Peptide Sequence').first()\ninter = amanda_pep.join(morph_pep, how='inner', lsuffix='[amanda]', rsuffix='[morpheus]')\n-pylab.figure()\ninter.plot('Amanda Score', 'Morpheus Score', kind='hexbin', gridsize=10)\npylab.show()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update filtering example script |
377,522 | 28.07.2020 00:34:23 | -10,800 | 252bb7d68d256e51176065629381773d8ed0a26b | Fix example 1 to work with current fasta parser | [
{
"change_type": "MODIFY",
"old_path": "doc/source/_static/example_fasta.py",
"new_path": "doc/source/_static/example_fasta.py",
"diff": "@@ -15,8 +15,9 @@ if not os.path.isfile('yeast.fasta.gz'):\nprint('Cleaving the proteins with trypsin...')\nunique_peptides = set()\n-for description, sequence in fasta.read(gzip.open('yeast.fasta.gz')):\n- new_peptides = parser.cleave(sequence, parser.expasy_rules['trypsin'])\n+with gzip.open('yeast.fasta.gz', mode='rt') as gzfile:\n+ for description, sequence in fasta.FASTA(gzfile):\n+ new_peptides = parser.cleave(sequence, 'trypsin')\nunique_peptides.update(new_peptides)\nprint('Done, {0} sequences obtained!'.format(len(unique_peptides)))\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/examples/example_fasta.rst",
"new_path": "doc/source/examples/example_fasta.rst",
"diff": "@@ -31,15 +31,25 @@ a gzip-compressed database from Uniprot FTP server:\n:language: python\n:lines: 8-14\n-The :py:func:`pyteomics.fasta.read` function allows to iterate over the protein\n-sequences in a FASTA file in a regular Python loop. In order to obtain\n+The :py:func:`pyteomics.fasta.FASTA` class allows to iterate over the protein\n+sequences in a FASTA file in a regular Python loop. It replaced\n+:py:func:`pyteomics.fasta.read`, although the latter still exists, too.\n+In this example, we create a :py:class:`FASTA` object from a file-like object\n+representing a **gzip** archive. All file parser objects are flexible and support\n+a variety of use cases. Additionally :py:mod:`pyteomics.fasta` supports an even\n+greater variety of FASTA types and flavors.\n+\n+For all FASTA parser classes, check :doc:`../api/fasta`.\n+See also: an explanation of :ref:`indexing`.\n+\n+In order to obtain\nthe peptide sequences, we cleave each protein using the\n:py:func:`pyteomics.parser.cleave` function and combine results into a set object\nthat automatically discards multiple occurrences of the same sequence.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 16-21\n+ :lines: 16-22\nLater we will calculate different peptide properties. In order\nto store them, we create a list of dicts, where each dict stores the properties\n@@ -47,7 +57,7 @@ of a single peptide, including its sequence.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 23\n+ :lines: 24\nIt is also more efficient to pre-parse the sequences into individual amino acids\nand supply the parsed structures into the functions that calculate m/z, charge,\n@@ -56,14 +66,14 @@ that they are taken into the account when calculating m/z and charge of a peptid\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 25-31\n+ :lines: 26-32\nFor our purposes, we will limit ourselves to reasonably short peptides with\nthe length less than 100 residues.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 33\n+ :lines: 34\nWe use :py:func:`pyteomics.electrochem.charge` to calculate the charge at pH=2.0.\nThe neutral mass and m/z of an ion is found with\n@@ -71,7 +81,7 @@ The neutral mass and m/z of an ion is found with\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 35-42\n+ :lines: 36-43\nNext, we calculate the retention time in the reversed- and normal-phase\nchromatography using :py:func:`pyteomics.achrom.calculate_RT` for two different\n@@ -83,7 +93,7 @@ correspondingly.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 44-52\n+ :lines: 45-53\nNow, as we have all the numbers we can estimate the complexity of a sample\nby plotting the distributions of parameters measurable in a typical proteomic\n@@ -92,14 +102,14 @@ plotting function from matplotlib.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 54-59\n+ :lines: 55-60\nThe same set of commands allows us to plot the distribution of charge states\nin the sample:\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 61-66\n+ :lines: 62-67\nNext, we want to visualize the statistical correlation\nbetween m/z and retention time in reversed-phase chromatography.\n@@ -119,14 +129,14 @@ m/z and retention time.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 68-72\n+ :lines: 69-73\nThe obtained heatmap is plotted with :py:func:`matplotlib.pyplot.imshow()` function\nthat visualizes matrices.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 74-78\n+ :lines: 75-79\nThe same code can also be applied to compare the retention times obtained on\ndifferent chromatographic phases.\n@@ -135,7 +145,7 @@ different chromatographic phases seem to be uncorrelated.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 80-94\n+ :lines: 81-95\nFinally, let us check whether the retention times remain uncorrelated when\nwe narrow down the sample of peptides. We select the peptides with m/z lying in\n@@ -144,7 +154,7 @@ the sample allows us to use a scatter plot.\n.. literalinclude:: ../_static/example_fasta.py\n:language: python\n- :lines: 96-108\n+ :lines: 97-109\nAs you can see, the retention times of peptides lying in a narrow mass window\nturn out to be substantially correlated.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix example 1 to work with current fasta parser |
377,522 | 28.07.2020 00:34:46 | -10,800 | f33b41688957ecfab400a1fb176129e1d461fdae | Fix for current Sphinx | [
{
"change_type": "MODIFY",
"old_path": "doc/source/conf.py",
"new_path": "doc/source/conf.py",
"diff": "@@ -40,6 +40,7 @@ sys.path.insert(0, pyteomics_path)\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n+ 'numpydoc',\n'sphinx.ext.autodoc',\n'sphinx.ext.autosummary',\n'sphinx.ext.viewcode',\n@@ -50,7 +51,6 @@ extensions = [\n'matplotlib.sphinxext.plot_directive',\n# 'matplotlib.sphinxext.only_directives',\n'matplotlib.sphinxext.mathmpl',\n- 'numpydoc',\n'sphinxcontrib.googleanalytics',\n'sphinx_sitemap',\n]\n@@ -284,7 +284,7 @@ def skip(app, what, name, obj, skip, options):\nreturn False\nreturn skip\n-autodoc_default_flags = ['members', 'inherited-members', 'show-inheritance']\n+autodoc_default_options = {'members': True, 'inherited-members': True, 'show-inheritance': True}\ndef setup(app):\napp.connect('autodoc-skip-member', skip)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix for current Sphinx |
377,522 | 28.07.2020 00:36:26 | -10,800 | 1d5d70cecca64a09e0695e90e217973e04355cb2 | Add more warnings and tests for _check_use_index | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -877,21 +877,40 @@ def _make_chain(reader, readername, full_output=False):\ndef _check_use_index(source, use_index, default):\n+ try:\n+ if isinstance(source, basestring):\n+ return default\nif use_index is not None:\nuse_index = bool(use_index)\n- if 'b' not in getattr(source, 'mode', 'b'):\n- if use_index is True:\n- warnings.warn('use_index is True, but the file mode is not binary. '\n- 'Setting use_index to False')\n+ seekable = True\n+ if hasattr(source, 'seekable'):\n+ if not source.seekable():\n+ use_index = False\n+ seekable = False\n+ if hasattr(source, 'mode'):\n+ ui = 'b' in source.mode\n+ if use_index is not None and ui != use_index:\n+ warnings.warn('use_index is {}, but the file mode is {}. '\n+ 'Setting use_index to {}'.format(use_index, source.mode, ui))\n+ use_index = ui\n+\n+ if use_index and not seekable:\n+ warnings.warn('Cannot use indexing as {} is not seekable. Setting `use_index` to False.'.format(source))\nuse_index = False\n- elif 'b' in getattr(source, 'mode', ''):\n- if use_index is False:\n- warnings.warn('use_index is False, but the file mode is binary. '\n- 'Setting use_index to True')\n- use_index = True\n- if use_index is None:\n- use_index = default\n+\n+ if use_index is not None:\n+ return use_index\n+\n+ warnings.warn('Could not check mode on {}. '\n+ 'Specify `use_index` explicitly to avoid errors.'.format(source))\n+ return default\n+\n+ except Exception as e:\n+ warnings.warn('Could not check mode on {}. Reason: {!r}. '\n+ 'Specify `use_index` explicitly to avoid errors.'.format(source, e))\n+ if use_index is not None:\nreturn use_index\n+ return default\nclass FileReadingProcess(mp.Process):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -4,6 +4,8 @@ from itertools import count\nimport operator as op\nimport numpy as np\nimport pandas as pd\n+import tempfile\n+\nfrom os import path\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n@@ -13,6 +15,7 @@ from pyteomics import tandem\npsms = list(zip(count(), string.ascii_uppercase + string.ascii_lowercase,\nnp.arange(0.01, 0.062, 0.001)))\n+\nclass QvalueTest(unittest.TestCase):\nkey = staticmethod(op.itemgetter(0))\nis_decoy = staticmethod(lambda x: x[1].islower())\n@@ -186,6 +189,7 @@ class QvalueTest(unittest.TestCase):\nq1 = aux.qvalues(psms, key=op.itemgetter('expect'), is_decoy=tandem.is_decoy)\nself.assertTrue(np.allclose(q0['q'], q1['q']))\n+\nclass FilterTest(unittest.TestCase):\nkey = staticmethod(op.itemgetter(0))\nis_decoy = staticmethod(lambda x: x[1].islower())\n@@ -738,6 +742,7 @@ class FilterTest(unittest.TestCase):\nf1 = list(f)\nself.assertEqual(len(f1), 21)\n+\nclass FDRTest(unittest.TestCase):\nis_decoy = staticmethod(lambda x: x[1].islower())\n@@ -788,7 +793,8 @@ class FDRTest(unittest.TestCase):\ndef test_sigma_fdr(self):\nself.assertAlmostEqual(aux.sigma_fdr(psms, is_decoy=self.is_decoy), 0.28263343)\n-class RegressionTests(unittest.TestCase):\n+\n+class RegressionTest(unittest.TestCase):\nx = [1, 2, 3]\ny = [3, 5, 7]\na = 2\n@@ -853,7 +859,8 @@ class RegressionTests(unittest.TestCase):\nwith self.assertRaises(aux.PyteomicsError):\naux.linear_regression_perpendicular(self.x)\n-class OffsetIndexTests(unittest.TestCase):\n+\n+class OffsetIndexTest(unittest.TestCase):\ndef setUp(self):\nself.sequence = [(str(i), i) for i in range(10)]\nself.index = aux.OffsetIndex(self.sequence)\n@@ -880,6 +887,39 @@ class OffsetIndexTests(unittest.TestCase):\nself.assertEqual(self.index.between('8', None), ['8', '9'])\n+class UseIndexTest(unittest.TestCase):\n+ def _check_file_object(self, fo, value):\n+ self.assertEqual(aux._check_use_index(fo, None, None), value)\n+\n+ def test_textfile(self):\n+ with open('test.fasta') as f:\n+ self._check_file_object(f, False)\n+\n+ def test_binfile(self):\n+ with open('test.mgf', 'rb') as f:\n+ self._check_file_object(f, True)\n+\n+ def test_tmpfile_text(self):\n+ with tempfile.TemporaryFile(mode='r') as f:\n+ self._check_file_object(f, False)\n+\n+ def test_tmpfile_bin(self):\n+ with tempfile.TemporaryFile(mode='wb') as f:\n+ self._check_file_object(f, True)\n+\n+ def test_stringio(self):\n+ try:\n+ from io import StringIO\n+ except ImportError:\n+ from StringIO import StringIO\n+ with StringIO(u'test') as f:\n+ with warnings.catch_warnings(record=True) as w:\n+ warnings.simplefilter('always')\n+ aux._check_use_index(f, None, None)\n+ self.assertEqual(len(w), 1)\n+ self.assertIs(w[0].category, UserWarning)\n+\n+\nimport warnings\nif __name__ == '__main__':\nwith warnings.catch_warnings():\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add more warnings and tests for _check_use_index |
377,522 | 28.07.2020 01:16:04 | -10,800 | 9cd485cda16f9c6939d2d0b17833b782244002c2 | Support older Sphinx | [
{
"change_type": "MODIFY",
"old_path": "doc/source/conf.py",
"new_path": "doc/source/conf.py",
"diff": "@@ -284,7 +284,7 @@ def skip(app, what, name, obj, skip, options):\nreturn False\nreturn skip\n-autodoc_default_options = {'members': True, 'inherited-members': True, 'show-inheritance': True}\n+autodoc_default_options = {'members': None, 'inherited-members': None, 'show-inheritance': None}\ndef setup(app):\napp.connect('autodoc-skip-member', skip)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Support older Sphinx |
377,522 | 28.07.2020 01:42:37 | -10,800 | 338612fc8afd2948b5e2922f6b32dfa7894febb2 | Use napoleon instead of numpydoc | [
{
"change_type": "MODIFY",
"old_path": "doc/build-requirements.txt",
"new_path": "doc/build-requirements.txt",
"diff": "lxml\nsqlalchemy\n-numpydoc\nmatplotlib\ngit+https://github.com/KarrLab/sphinxcontrib-googleanalytics.git\nsphinx-sitemap\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/conf.py",
"new_path": "doc/source/conf.py",
"diff": "@@ -40,7 +40,7 @@ sys.path.insert(0, pyteomics_path)\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n- 'numpydoc',\n+ 'sphinx.ext.napoleon',\n'sphinx.ext.autodoc',\n'sphinx.ext.autosummary',\n'sphinx.ext.viewcode',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Use napoleon instead of numpydoc |
377,522 | 28.07.2020 02:09:14 | -10,800 | 2cc1c91f2bda7de0824b34bfdc34692286ed4d50 | Doc fixes on args and kwargs | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -441,8 +441,10 @@ def get_spectrum(source, title, *args, **kwargs):\nFile to read from.\ntitle : str\nSpectrum title.\n-\n- The rest of the arguments are the same as for :py:func:`read`.\n+ *args\n+ Given to :py:func:`read`.\n+ **kwargs\n+ Given to :py:func:`read`.\nReturns\n-------\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -330,9 +330,12 @@ def DataFrame(*args, **kwargs):\nParameters\n----------\n- *args, **kwargs : passed to :py:func:`chain`\n+ *args\n+ Passed to :py:func:`chain`.\n+ **kwargs\n+ Passed to :py:func:`chain`.\n- sep : str or None, optional\n+ sep : str or None, keyword only, optional\nSome values related to PSMs (such as protein information) are variable-length\nlists. If `sep` is a :py:class:`str`, they will be packed into single string using\nthis delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n@@ -401,11 +404,14 @@ def filter_df(*args, **kwargs):\nParameters\n----------\n- key : str / iterable / callable, optional\n+ key : str / iterable / callable, keyword only, optional\nDefault is 'mascot:expectation value'.\n- is_decoy : str / iterable / callable, optional\n+ is_decoy : str / iterable / callable, keyword only, optional\nDefault is 'isDecoy'.\n- *args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ *args\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ **kwargs\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\nReturns\n-------\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/openms/idxml.py",
"new_path": "pyteomics/openms/idxml.py",
"diff": "@@ -325,9 +325,13 @@ def DataFrame(*args, **kwargs):\nParameters\n----------\n- *args, **kwargs : passed to :py:func:`chain`\n+ *args\n+ Passed to :py:func:`chain`\n- sep : str or None, optional\n+ **kwargs\n+ Passed to :py:func:`chain`\n+\n+ sep : str or None, keyword only, optional\nSome values related to PSMs (such as protein information) are variable-length\nlists. If `sep` is a :py:class:`str`, they will be packed into single string using\nthis delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n@@ -395,11 +399,14 @@ def filter_df(*args, **kwargs):\nParameters\n----------\n- key : str / iterable / callable, optional\n- Default is 'score'. You will probably need to change it.\n- is_decoy : str / iterable / callable, optional\n+ key : str / iterable / callable, keyword only, optional\n+ Peptide identification score. Default is 'score'. You will probably need to change it.\n+ is_decoy : str / iterable / callable, keyword only, optional\nDefault is 'is decoy'.\n- *args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ *args\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ **kwargs\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\nReturns\n-------\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -355,9 +355,13 @@ def DataFrame(*args, **kwargs):\nParameters\n----------\n- *args, **kwargs : passed to :py:func:`chain`\n+ *args\n+ Passed to :py:func:`chain`.\n- sep : str or None, optional\n+ **kwargs\n+ Passed to :py:func:`chain`.\n+\n+ sep : str or None, keyword only, optional\nSome values related to PSMs (such as protein information) are variable-length\nlists. If `sep` is a :py:class:`str`, they will be packed into single string using\nthis delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n@@ -431,12 +435,14 @@ def filter_df(*args, **kwargs):\nParameters\n----------\n- key : str / iterable / callable, optional\n- Default is 'expect'.\n- is_decoy : str / iterable / callable, optional\n+ key : str / iterable / callable, keyword only, optional\n+ PSM score. Default is 'expect'.\n+ is_decoy : str / iterable / callable, keyword only, optional\nDefault is to check if all strings in the \"protein\" column start with `'DECOY_'`\n-\n- *args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ *args\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ **kwargs\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\nReturns\n-------\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -218,7 +218,7 @@ def DataFrame(*args, **kwargs):\nParameters\n----------\n- sep : str or None, optional\n+ sep : str or None, keyword only, optional\nSome values related to protein groups are variable-length lists.\nIf `sep` is a :py:class:`str`, they will be packed into single string using\nthis delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n@@ -227,7 +227,11 @@ def DataFrame(*args, **kwargs):\npd_kwargs : dict, optional\nKeyword arguments passed to the :py:class:`pandas.DataFrame` constructor.\n- *args, **kwargs : passed to :py:func:`chain`.\n+ *args\n+ Passed to :py:func:`chain`.\n+\n+ **kwargs\n+ Passed to :py:func:`chain`.\nReturns\n-------\n@@ -259,6 +263,7 @@ def DataFrame(*args, **kwargs):\nyield out\nreturn pd.DataFrame(gen_items(), **pd_kwargs)\n+\ndef filter_df(*args, **kwargs):\n\"\"\"Read protXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\nPositional arguments can be protXML files or DataFrames.\n@@ -269,15 +274,17 @@ def filter_df(*args, **kwargs):\nParameters\n----------\n- key : str / iterable / callable, optional\n+ key : str / iterable / callable, keyword only, optional\nDefault is 'probability'.\n- is_decoy : str / iterable / callable, optional\n+ is_decoy : str / iterable / callable, keyword only, optional\nDefault is to check that \"protein_name\" starts with `'DECOY_'`.\n- reverse : bool, optional\n+ reverse : bool, keyword only, optional\nShould be :py:const:`True` if higher score is better.\nDefault is :py:const:`True` (because the default key is 'probability').\n-\n- *args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ *args\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ **kwargs\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\nReturns\n-------\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -282,7 +282,11 @@ def DataFrame(*args, **kwargs):\npd_kwargs : dict, optional\nKeyword arguments passed to the :py:class:`pandas.DataFrame` constructor.\n- *args, **kwargs : passed to :py:func:`chain`.\n+ *args\n+ Passed to :py:func:`chain`.\n+\n+ **kwargs\n+ Passed to :py:func:`chain`.\nReturns\n-------\n@@ -336,7 +340,10 @@ def filter_df(*args, **kwargs):\nDefault is 'expect'.\nis_decoy : str / iterable / callable, optional\nDefault is to check if all strings in the \"protein\" column start with `'DECOY_'`\n- *args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ *args\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+ **kwargs\n+ Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\nReturns\n-------\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Doc fixes on args and kwargs |
377,522 | 28.07.2020 02:22:09 | -10,800 | 0f57e88c419fc12f226ca9baeefd01da963aeecb | Doc fixes in pylab_aux | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -65,18 +65,23 @@ import numpy as np\nfrom .auxiliary import linear_regression, PyteomicsError\nfrom . import parser, mass\n+\ndef plot_line(a, b, xlim=None, *args, **kwargs):\n\"\"\"Plot a line y = a * x + b.\nParameters\n----------\n- a, b : float\n- The slope and intercept of the line.\n+ a : float\n+ The slope of the line.\n+ b : float\n+ The intercept of the line.\nxlim : tuple, optional\nMinimal and maximal values of `x`. If not given, :py:func:`pylab.xlim`\nwill be called.\n-\n- *args, **kwargs : passed to :py:func:`pylab.plot` after `x` and `y` values.\n+ *args\n+ Passed to :py:func:`pylab.plot` after `x` and `y` values.\n+ **kwargs\n+ Passed to :py:func:`pylab.plot`.\nReturns\n-------\n@@ -88,13 +93,16 @@ def plot_line(a, b, xlim=None, *args, **kwargs):\n[a * xlim[0] + b, a * xlim[1] + b],\n*args, **kwargs)\n+\ndef scatter_trend(x, y=None, **kwargs):\n\"\"\"Make a scatter plot with a linear regression.\nParameters\n----------\n- x, y : array_like of float\n- 1-D arrays of floats. If `y` is omitted, `x` must be a 2-D array of shape (N, 2).\n+ x : array_like of float\n+ 1-D array of floats. If `y` is omitted, `x` must be a 2-D array of shape (N, 2).\n+ y : array_like of float, optional\n+ 1-D arrays of floats. If `y` is omitted or :py:const:`None`, `x` must be a 2-D array of shape (N, 2).\nplot_trend : bool, optional\nIf :py:const:`True` then plot a trendline (default).\nplot_sigmas : bool, optional\n@@ -179,6 +187,7 @@ def scatter_trend(x, y=None, **kwargs):\nlegend = None\nreturn sc, line, s_lines, legend\n+\ndef plot_function_3d(x, y, function, **kwargs):\n\"\"\"Plot values of a function of two variables in 3D.\n@@ -188,21 +197,28 @@ def plot_function_3d(x, y, function, **kwargs):\nParameters\n----------\n- x, y : array_like of float\n- The plotting range.\n+ x : array_like of float\n+ The plotting range on X axis.\n+ y : array_like of float\n+ The plotting range on Y axis.\nfunction : function\nThe function to plot.\n- plot_type : {'surface', 'wireframe', 'scatter', 'contour', 'contourf'}\n+ plot_type : {'surface', 'wireframe', 'scatter', 'contour', 'contourf'}, keyword only, optional\nThe type of a plot, see\n`scipy cookbook <http://www.scipy.org/Cookbook/Matplotlib/mplot3D>`_\nfor examples. The default value is 'surface'.\nnum_contours : int\nThe number of contours to plot, 50 by default.\n- xlabel, ylabel, zlabel : str, optional\n- The axes labels. Empty by default.\n- title : str, optional\n+ xlabel : str, keyword only, optional\n+ The X axis label. Empty by default.\n+ ylabel : str, keyword only, optional\n+ The Y axis label. Empty by default.\n+ zlabel : str, keyword only, optional\n+ The Z axis label. Empty by default.\n+ title : str, keyword only, optional\nThe title. Empty by default.\n- **kwargs : passed to the respective plotting function.\n+ **kwargs\n+ Passed to the respective plotting function.\n\"\"\"\nimport mpl_toolkits.mplot3d.axes3d as pylab3d\nax = pylab3d.Axes3D(pylab.gcf())\n@@ -240,6 +256,7 @@ def plot_function_3d(x, y, function, **kwargs):\nelse:\nraise PyteomicsError('Unknown plot type: {}'.format(plot_type))\n+\ndef plot_function_contour(x, y, function, **kwargs):\n\"\"\"Make a contour plot of a function of two variables.\n@@ -257,7 +274,8 @@ def plot_function_contour(x, y, function, **kwargs):\nThe axes labels. Empty by default.\ntitle : str, optional\nThe title. Empty by default.\n- **kwargs : passed to :py:func:`pylab.contour` or :py:func:`pylab.contourf`.\n+ **kwargs\n+ Passed to :py:func:`pylab.contour` or :py:func:`pylab.contourf`.\n\"\"\"\npylab.xlabel(kwargs.pop('xlabel', ''))\npylab.ylabel(kwargs.pop('ylabel', ''))\n@@ -277,6 +295,7 @@ def plot_function_contour(x, y, function, **kwargs):\npylab.contour(X, Y, Z, num_contours,\ncmap=kwargs.pop('cmap', pylab.cm.jet), **kwargs)\n+\ndef plot_qvalue_curve(qvalues, *args, **kwargs):\n\"\"\"\nPlot a curve with q-values on the X axis and corresponding PSM number\n@@ -286,13 +305,16 @@ def plot_qvalue_curve(qvalues, *args, **kwargs):\n----------\nqvalues : array-like\nAn array of q-values for sorted PSMs.\n- xlabel : str, optional\n+ xlabel : str, keyword only, optional\nLabel for the X axis. Default is \"q-value\".\n- ylabel : str, optional\n+ ylabel : str, keyword only, optional\nLabel for the Y axis. Default is \"# of PSMs\".\n- title : str, optional\n+ title : str, keyword only, optional\nThe title. Empty by default.\n- *args, **kwargs : will be given to :py:func:`pylab.plot` after `x` and `y`.\n+ *args\n+ Given to :py:func:`pylab.plot` after `x` and `y`.\n+ **kwargs\n+ Given to :py:func:`pylab.plot`.\nReturns\n-------\n@@ -303,6 +325,7 @@ def plot_qvalue_curve(qvalues, *args, **kwargs):\npylab.title(kwargs.pop('title', ''))\nreturn pylab.plot(qvalues, 1+np.arange(qvalues.size), *args, **kwargs)\n+\ndef plot_spectrum(spectrum, centroided=True, *args, **kwargs):\n\"\"\"\nPlot a spectrum, assuming it is a dictionary containing \"m/z array\" and \"intensity array\".\n@@ -315,13 +338,16 @@ def plot_spectrum(spectrum, centroided=True, *args, **kwargs):\ncentroided : bool, optional\nIf :py:const:`True` (default), peaks of the spectrum are plotted using :py:func:`pylab.bar`.\nIf :py:const:`False`, the arrays are simply plotted using :py:func:`pylab.plot`.\n- xlabel : str, optional\n+ xlabel : str, keyword only, optional\nLabel for the X axis. Default is \"m/z\".\n- ylabel : str, optional\n+ ylabel : str, keyword only, optional\nLabel for the Y axis. Default is \"intensity\".\n- title : str, optional\n+ title : str, keyword only, optional\nThe title. Empty by default.\n- *args, **kwargs : will be given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n+ *args\n+ Given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n+ **kwargs\n+ Given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n\"\"\"\npylab.xlabel(kwargs.pop('xlabel', 'm/z'))\npylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n@@ -346,29 +372,32 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nA modX sequence.\ncentroided : bool, optional\nPassed to :py:func:`plot_spectrum`.\n- types : Container, optional\n+ types : Container, keyword only, optional\nIon types to be considered for annotation. Default is `('b', 'y')`.\n- maxcharge : int, optional\n+ maxcharge : int, keyword only, optional\nMaximum charge state for fragment ions to be considered. Default is `1`.\n- colors : dict, optional\n+ colors : dict, keyword only, optional\nKeys are ion types, values are colors to plot the annotated peaks with. Defaults to a red-blue scheme.\n- ftol : float, optional\n+ ftol : float, keyword only, optional\nA fixed m/z tolerance value for peak matching. Alternative to `rtol`.\n- rtol : float, optional\n+ rtol : float, keyword only, optional\nA relative m/z error for peak matching. Default is 10 ppm.\n- adjust_text : bool, optional\n+ adjust_text : bool, keyword only, optional\nAdjust the overlapping text annotations using :py:mod:`adjustText`.\n- text_kw : dict, optional\n+ text_kw : dict, keyword only, optional\nKeyword arguments for :py:func:`pylab.text`.\n- adjust_kw : dict, optional\n+ adjust_kw : dict, keyword only, optional\nKeyword argyuments for `:py:func:`adjust_text`.\n- ion_comp : dict, optional\n+ ion_comp : dict, keyword only, optional\nA dictionary defining definitions of ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\n- mass_data : dict, optional\n+ mass_data : dict, keyword only, optional\nA dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\n- aa_mass : dict, optional\n+ aa_mass : dict, keyword only, optional\nA dictionary of amino acid residue masses.\n- *args, **kwargs : passed to :py:func:`plot_spectrum`.\n+ *args\n+ Passed to :py:func:`plot_spectrum`.\n+ **kwargs\n+ Passed to :py:func:`plot_spectrum`.\n\"\"\"\ntypes = kwargs.pop('types', ('b', 'y'))\nmaxcharge = kwargs.pop('maxcharge', 1)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Doc fixes in pylab_aux |
377,522 | 29.09.2020 15:55:59 | -10,800 | 26d2bb7432ded8e006de3716747dbdfaf6cf2244 | Fix download from Python in example 2 | [
{
"change_type": "MODIFY",
"old_path": "doc/source/_static/example_msms.py",
"new_path": "doc/source/_static/example_msms.py",
"diff": "from pyteomics import mgf, pepxml, mass\nimport os\n-from urllib.request import urlretrieve\n+from urllib.request import urlopen, Request\nimport pylab\n# get the files\nfor fname in ('mgf', 'pep.xml'):\nif not os.path.isfile('example.' + fname):\n- urlretrieve('http://pyteomics.readthedocs.io/en/latest/_static/example.'\n- + fname, 'example.' + fname)\n+ headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'}\n+ url = 'http://pyteomics.readthedocs.io/en/latest/_static/example.' + fname\n+ request = Request(url, None, headers)\n+ target_name = 'example.' + fname\n+ with urlopen(request) as response, open(target_name, 'wb') as fout:\n+ print('Downloading ' + target_name + '...')\n+ fout.write(response.read())\ndef fragments(peptide, types=('b', 'y'), maxcharge=1):\n\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/examples/example_msms.rst",
"new_path": "doc/source/examples/example_msms.rst",
"diff": "@@ -27,7 +27,7 @@ Then we'll download the files, if needed:\n.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 7-10\n+ :lines: 7-15\nNow it's time to define the function that will give us *m/z* of theoretical\nfragments for a given sequence. We will use\n@@ -37,7 +37,7 @@ over possible charges and ion types:\n.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 12-25\n+ :lines: 17-30\nSo, the outer loop is over \"fragmentation sites\", the next one is over ion\ntypes, then over charges, and lastly over two parts of the sequence\n@@ -49,25 +49,25 @@ recommended.\n.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 27-29\n+ :lines: 32-34\nNow prepare the figure...\n.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 31-35\n+ :lines: 36-40\n... plot the real spectrum:\n.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 36-37\n+ :lines: 41-42\n... calculate and plot the theoretical spectrum, and show everything:\n.. literalinclude:: ../_static/example_msms.py\n:language: python\n- :lines: 38-43\n+ :lines: 43-48\nYou will see something like this:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix download from Python in example 2 |
377,522 | 29.09.2020 18:54:21 | -10,800 | 29f41361aa75d28046ecde1924b637f89267f3c5 | Update docs, changelog, authors, bump version to 4.3.3 | [
{
"change_type": "MODIFY",
"old_path": "AUTHORS",
"new_path": "AUTHORS",
"diff": "@@ -5,6 +5,7 @@ On Github\n.........\nVladimir Gorshkov [caetera]\n+Andrey Rozenberg [alephreish]\nOn Bitbucket (before March 1, 2020)\n"
},
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-dev\n----\n+4.3.3\n+-----\n+\n+ - Add :py:func:`pyteomics.electrochem.gravy` (`#9 <https://github.com/levitsky/pyteomics/pull/9>`_).\n+\n+ - Fixes and improvements in :py:func:`pyteomics.pepxml.roc_curve` (`#10 <https://github.com/levitsky/pyteomics/pull/10>`_).\n+\n+ - Changes in guessing behavior of :py:func:`read` functions.\n+\n+ In modules that implement :ref:`indexing parsers <indexing>` for non-XML formats (MGF, FASTA, PEFF, ms1/ms2),\n+ when a parser is instantiated using :py:func:`read`, the parser class to instantiate is guessed\n+ based on the mode of the file object passed to :py:func:`read` (text or binary).\n-Add :py:func:`pyteomics.electrochem.gravy` (`#9 <https://github.com/levitsky/pyteomics/pull/9>`_).\n+ With some file-like objects, mode cannot be easily deduced without consuming some of the data.\n+ You will now see more warnings in case `use_index` is not explicitly passed to :py:func:`read` and reading mode is not obvious.\n+ There will also be warnings if `use_index` is specified but the file is opened in the wrong mode.\n+ To avoid all of this, youy are encouraged to instantiate parser classes directly,\n+ or explicitly specify `use_index` to :py:func:`read` in all corner cases.\n4.3.2\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.3.3dev1\n+4.3.3\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data.rst",
"new_path": "doc/source/data.rst",
"diff": "@@ -6,16 +6,16 @@ The following section is dedicated to data manipulation. **Pyteomics** aims to\nsupport the most common formats of (LC-)MS/MS data, peptide identification\nresults and protein databases.\n-.. include :: data/notes.rst\n+.. include :: data/notes.rst.inc\n.. contents:: Document contents\n:backlinks: top\n:depth: 3\n-.. include :: data/text.rst\n+.. include :: data/text.rst.inc\n-.. include :: data/xml.rst\n+.. include :: data/xml.rst.inc\n-.. include :: data/indexing.rst\n+.. include :: data/indexing.rst.inc\n-.. include :: data/tda.rst\n+.. include :: data/tda.rst.inc\n"
},
{
"change_type": "RENAME",
"old_path": "doc/source/data/indexing.rst",
"new_path": "doc/source/data/indexing.rst.inc",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "doc/source/data/notes.rst",
"new_path": "doc/source/data/notes.rst.inc",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "doc/source/data/tda.rst",
"new_path": "doc/source/data/tda.rst.inc",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "doc/source/data/text.rst",
"new_path": "doc/source/data/text.rst.inc",
"diff": "@@ -71,7 +71,7 @@ Class-based interface\nSince version 3.4.3, MGF parsing functionality is encapsulated in a class:\n:py:class:`pyteomics.mgf.MGF`. This class can be used for:\n- - sequential parsing of the file (the same as :py:func:`read`)::\n+ - sequential parsing of the file (the same as :py:func:`read`):\n.. code-block:: python\n@@ -79,7 +79,7 @@ Since version 3.4.3, MGF parsing functionality is encapsulated in a class:\n..: for spectrum in reader:\n..: ...\n- - accessing the file header (the same as :py:func:`read_header`)::\n+ - accessing the file header (the same as :py:func:`read_header`):\n.. code-block:: python\n@@ -95,7 +95,7 @@ Since version 3.4.3, MGF parsing functionality is encapsulated in a class:\n'useremail': 'leu@altered-state.edu',\n'username': 'Lou Scene'}\n- - direct access to spectra by title (the same as :py:func:`get_spectrum`)::\n+ - direct access to spectra by title (the same as :py:func:`get_spectrum`):\n.. code-block:: python\n"
},
{
"change_type": "RENAME",
"old_path": "doc/source/data/xml.rst",
"new_path": "doc/source/data/xml.rst.inc",
"diff": "@@ -340,7 +340,7 @@ idXML\n**idXML** is an OpenMS format for peptide identifications. It is supported in :py:mod:`pyteomics.openms.idxml`.\nIt partially supports indexing (protein information can be indexed and extracted with `retrieve_refs`).\n-The regular iterative parsing is done through :py:func:`read` or :py:class:`IDXML`, and :py:class:`pandas.DataFrame`s\n+The regular iterative parsing is done through :py:func:`read` or :py:class:`IDXML`, and :py:class:`pandas.DataFrame`\ncan be created as well.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/intro.rst",
"new_path": "doc/source/intro.rst",
"diff": "@@ -12,10 +12,12 @@ Python shell:\nIPython users can use the following shortcut:\n-.. code-block:: python\n+.. code-block::\n+\n+ In [1]: from pyteomics.mass import calculate_mass\n+\n+ In [2]: calculate_mass?\n- >>> from pyteomics.mass import calculate_mass\n- >>> calculate_mass?\nWe expect the reader to be familiar with the basic Python syntax as well as\nproteomics concepts.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update docs, changelog, authors, bump version to 4.3.3 |
377,522 | 08.10.2020 17:18:34 | -10,800 | 91cbbc227b07c09b09566b347c6c886aa5235afc | Fix URL formatting in USI doc | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/usi.py",
"new_path": "pyteomics/usi.py",
"diff": "@@ -4,12 +4,12 @@ usi - Universal Spectrum Identifier (USI) parser and minimal PROXI client\nSummary\n-------\n-`USI <http://www.psidev.info/usi>` is a standardized method of referencing a specific\n+`USI <http://www.psidev.info/usi>`_ is a standardized method of referencing a specific\nspectrum in a dataset, possibly attached to an interpretation. This module includes a\n:class:`USI` type which can represent these constructs, :meth:`~USI.parse` them and\nreconstruct them.\n-One use-case for USI is to request spectrum information from a `PROXI <http://www.psidev.info/proxi>`\n+One use-case for USI is to request spectrum information from a `PROXI <http://www.psidev.info/proxi>`_\nservice host. PROXI services are available from several of the major national proteomics data hosts,\nincluding MassIVE, PeptideAtlas, PRIDE, and jPOST.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix URL formatting in USI doc |
377,522 | 09.10.2020 19:27:06 | -10,800 | d3538dd47fe44952633179dac5d9cad879a3d2a0 | Fix mgf.write with write_ions, add a read-write mgf test with ions | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/structures.py",
"new_path": "pyteomics/auxiliary/structures.py",
"diff": "import re\nfrom collections import defaultdict, Counter\n+import warnings\ntry:\nbasestring\n@@ -82,15 +83,19 @@ class Charge(int):\ndef __str__(self):\nreturn str(abs(self)) + '+-'[self < 0]\n+\nclass Ion(str):\n\"\"\"Represents an Ion, right now just a subclass of String.\n\"\"\"\n+ _pattern = r'([abcxyz]\\d+(\\-H2O|\\-NH3)?)([\\+|-]\\d+)' # \"y2-H2O+1\"\n+\ndef __init__(self, *args, **kwargs):\nif args and isinstance(args[0], basestring):\ntry:\n- self.ion_type, self.neutral_loss, self.charge = re.match(r'([abcxyz]\\d+(\\-H2O|\\-NH3)?)([\\+|-]\\d+)', args[0]).groups() #\"y2-H2O+1\"\n- except Exception as e:\n- raise PyteomicsError(\"Malformed ion string, must match the regex r'([abcxyz]\\d+(\\-H2O|\\-NH3)?)([\\+|-]\\d+)'\")\n+ self.ion_type, self.neutral_loss, self.charge = re.match(self._pattern, args[0]).groups()\n+ except Exception:\n+ raise PyteomicsError(\"Malformed ion string, must match the regex {!r}\".format(self._pattern))\n+\nclass ChargeList(list):\n\"\"\"Just a list of :py:class:`Charge`s. When printed, looks like an\n@@ -126,11 +131,13 @@ def _parse_charge(s, list_only=False):\npass\nreturn ChargeList(s)\n+\ndef _parse_ion(ion_text):\ntry:\nreturn Ion(ion_text)\nexcept Exception as e:\n- print(e)\n+ warnings.warn('Could not parse ion string: {} ({})'.format(ion_text, e.args[0]))\n+\nclass BasicComposition(defaultdict, Counter):\n\"\"\"A generic dictionary for compositions.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -75,6 +75,7 @@ import itertools as it\nimport sys\nfrom . import auxiliary as aux\n+\nclass MGFBase(object):\n\"\"\"Abstract mixin class representing an MGF file. Subclasses implement different approaches to parsing.\"\"\"\n_comments = set('#;!/')\n@@ -91,7 +92,6 @@ class MGFBase(object):\n_array_keys_unicode = [u'm/z array', u'intensity array', u'charge array', u'ion array']\nencoding = None\n-\ndef __init__(self, source=None, **kwargs):\n\"\"\"Create an MGF file object, set MGF-specific parameters.\n@@ -145,7 +145,6 @@ class MGFBase(object):\nelse:\nself._header = None\n-\ndef parse_precursor_charge(self, charge_text, list_only=False):\nreturn aux._parse_charge(charge_text, list_only=list_only)\n@@ -275,8 +274,6 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\n\"\"\"\ndelimiter = 'BEGIN IONS'\n-\n-\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\nread_ions=False, dtype=None, encoding='utf-8', index_by_scans=False, _skip_index=False, **kwargs):\nself.label = r'SCANS=(\\d+)\\s*' if index_by_scans else r'TITLE=([^\\n]*\\S)\\s*'\n@@ -516,30 +513,33 @@ def read_header(source):\n_default_key_order = ['title', 'pepmass', 'rtinseconds', 'charge']\n+\ndef _pepmass_repr(k, pepmass):\noutstr = k.upper() + '='\nif not isinstance(pepmass, (str, int, float)): # assume iterable\ntry:\noutstr += ' '.join(str(x) for x in pepmass if x is not None)\nexcept TypeError:\n- raise aux.PyteomicsError(\n- 'Cannot handle parameter: PEPMASS = {}'.format(pepmass))\n+ raise aux.PyteomicsError('Cannot handle parameter: PEPMASS = {}'.format(pepmass))\nelse:\noutstr += str(pepmass)\nreturn outstr\n+\ndef _charge_repr(k, charge):\nreturn '{}={}'.format(k.upper(), aux._parse_charge(str(charge)))\n+\ndef _default_repr(key, val):\nreturn '{}={}'.format(key.upper(), val)\n+\n_default_value_formatters = {'pepmass': _pepmass_repr, 'charge': _charge_repr}\n+\n@aux._file_writer()\n-def write(spectra, output=None, header='', key_order=_default_key_order,\n- fragment_format=None, write_charges=True, write_ions=False, use_numpy=None,\n- param_formatters=_default_value_formatters):\n+def write(spectra, output=None, header='', key_order=_default_key_order, fragment_format=None,\n+ write_charges=True, write_ions=False, use_numpy=None, param_formatters=_default_value_formatters):\n\"\"\"\nCreate a file in MGF format.\n@@ -577,10 +577,11 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nwrite_ions : bool, optional\nIf :py:const:`False`, fragment ions from 'ion array' will not be written.\n+ If :py:const:`True`, then `write_charges` is set to :py:const:`False`.\nDefault is :py:const:`False`.\nfragment_format : str, optional\n- Format string for m/z, intensity and charge of a fragment. Useful to set\n+ Format string for m/z, intensity and charge (or ion annotation) of a fragment. Useful to set\nthe number of decimal places, e.g.:\n``fragment_format='%.4f %.0f'``. Default is ``'{} {} {}'``.\n@@ -639,10 +640,13 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nfragment_format = '{} {} {}'\nnp_format_2 = '%.5f %.1f'\nnp_format_3 = '%.5f %.1f %d'\n+ np_format_i = '%.5f %.1f %s'\nelse:\n- np_format_2 = np_format_3 = fragment_format\n+ np_format_2 = np_format_3 = np_format_i = fragment_format\nformat_str = fragment_format + '\\n'\n+ if write_ions:\n+ write_charges = False\nif use_numpy is None:\nuse_numpy = not write_charges\n@@ -659,8 +663,7 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nhead_str = '\\n'.join(header)\nhead_dict = {}\nfor line in head_lines:\n- if not line.strip() or any(\n- line.startswith(c) for c in MGF._comments):\n+ if not line.strip() or any(line.startswith(c) for c in MGF._comments):\ncontinue\nl = line.split('=')\nif len(l) == 2:\n@@ -697,7 +700,7 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nX[:, 0] = spectrum['m/z array']\nX[:, 1] = spectrum['intensity array']\nX[:, 2] = spectrum['ion array']\n- np.savetxt(output, X, fmt=np_format_3)\n+ np.savetxt(output, X, fmt=np_format_i)\nelse:\nsuccess = False\nelse:\n@@ -706,7 +709,9 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nif not success:\nfor m, i, c in zip(spectrum['m/z array'],\nspectrum['intensity array'],\n- spectrum.get('charge array', it.cycle((None,)))):\n+ spectrum.get('charge array', it.cycle((None,))) if write_charges else\n+ spectrum.get('ion array', it.cycle((None,))) if write_ions else\n+ it.cycle((None,))):\noutput.write(format_str.format(\nm, i,\n(c if c not in nones else '')))\n@@ -715,4 +720,5 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\noutput.write('END IONS\\n\\n')\nreturn output\n+\nchain = aux._make_chain(read, 'read')\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -21,7 +21,8 @@ class MGFTest(unittest.TestCase):\ndef setUp(self):\nself.path = 'test.mgf'\nself.header = mgf.read_header(self.path)\n- self.spectra = list(mgf.read(self.path))\n+ with mgf.read(self.path) as f:\n+ self.spectra = list(f)\nself.tmpfile = tempfile.TemporaryFile(mode='r+')\nmgf.write(header=self.header, spectra=self.spectra, output=self.tmpfile)\nself.tmpfile.seek(0)\n@@ -33,6 +34,8 @@ class MGFTest(unittest.TestCase):\nself.tmpfile.close()\nself.path_annotated = 'test_annotated.mgf'\nself.header_annotated = mgf.read_header(self.path_annotated)\n+ with mgf.read(self.path_annotated, read_ions=True) as f:\n+ self.spectra_annotated = list(f)\ndef test_read(self):\nfor func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:\n@@ -67,8 +70,24 @@ class MGFTest(unittest.TestCase):\nself.assertEqual(data.mgf_spectra_short_no_charges, list(reader))\ndef test_read_with_ions(self):\n- with mgf.read(self.path_annotated, read_ions=True) as reader:\n- for spec_data, spec_read in zip(data.mgf_spectra_annotated_long, list(reader)):\n+ for spec_data, spec_read in zip(data.mgf_spectra_annotated_long, list(self.spectra_annotated)):\n+ # Check that the spectra have the same dict keys\n+ self.assertEqual(spec_data.keys(), spec_read.keys())\n+ for key in spec_data.keys():\n+ if type(spec_data[key]) == dict:\n+ self.assertDictEqual(spec_data[key], spec_read[key])\n+ else:\n+ np.testing.assert_array_equal(spec_data[key], spec_read[key])\n+\n+ def test_read_write_with_ions(self):\n+ formats = ['{:.6f} {:.6f} {}', '%.6f %.6f %s']\n+ for use_numpy in range(2):\n+ with tempfile.TemporaryFile(mode='r+') as f:\n+ mgf.write(self.spectra_annotated, f, write_ions=True, use_numpy=use_numpy,\n+ fragment_format=formats[use_numpy])\n+ f.seek(0)\n+ spectra = list(mgf.read(f, read_ions=True))\n+ for spec_data, spec_read in zip(data.mgf_spectra_annotated_long, spectra):\n# Check that the spectra have the same dict keys\nself.assertEqual(spec_data.keys(), spec_read.keys())\nfor key in spec_data.keys():\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix mgf.write with write_ions, add a read-write mgf test with ions |
377,523 | 12.10.2020 18:30:25 | -7,200 | 87897584bb85f631b5cbef306b114c2eb316dc26 | Reset changes in auxiliary/file_helpers.py | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -244,11 +244,11 @@ class IndexedReaderMixin(NoOpBaseReader):\nindex = self.default_index\nif index is None:\nraise PyteomicsError('Access by ID requires building an offset index.')\n- offsets = index[str(elem_id)]\n+ offsets = index[elem_id]\nreturn self._item_from_offsets(offsets)\ndef get_by_ids(self, ids):\n- return [self.get_by_id(str(key)) for key in ids]\n+ return [self.get_by_id(key) for key in ids]\ndef get_by_index(self, i):\ntry:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Reset changes in auxiliary/file_helpers.py |
377,523 | 12.10.2020 18:51:16 | -7,200 | 07020e5583289ec4281293d3b46156ba7c70a8d0 | Moved read_ions parameter to end of function in mgf.py | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -275,28 +275,32 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\ndelimiter = 'BEGIN IONS'\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\n- read_ions=False, dtype=None, encoding='utf-8', index_by_scans=False, _skip_index=False, **kwargs):\n+ dtype=None, encoding='utf-8', index_by_scans=False, read_ions=False, _skip_index=False, **kwargs):\nself.label = r'SCANS=(\\d+)\\s*' if index_by_scans else r'TITLE=([^\\n]*\\S)\\s*'\nsuper(IndexedMGF, self).__init__(source, parser_func=self._read, pass_file=False, args=(), kwargs={},\n- use_header=use_header, convert_arrays=convert_arrays, read_charges=read_charges,\n- read_ions=read_ions, dtype=dtype, encoding=encoding, _skip_index=_skip_index, **kwargs)\n+ use_header=use_header, convert_arrays=convert_arrays,\n+ read_charges=read_charges,\n+ dtype=dtype, encoding=encoding, read_ions=read_ions, _skip_index=_skip_index,\n+ **kwargs)\ndef __reduce_ex__(self, protocol):\nreturn (self.__class__,\n(self._source_init, False, self._convert_arrays, self._read_charges,\n- self._read_ions, self._dtype_dict, self.encoding, True),\n+ self._dtype_dict, self.encoding, True),\nself.__getstate__())\ndef __getstate__(self):\nstate = super(IndexedMGF, self).__getstate__()\nstate['use_header'] = self._use_header\nstate['header'] = self._header\n+ state['read_ions'] = self._read_ions\nreturn state\ndef __setstate__(self, state):\nsuper(IndexedMGF, self).__setstate__(state)\nself._header = state['header']\nself._use_header = state['use_header']\n+ self._read_ions = state['read_ions']\n@aux._keepstate_method\ndef _read_header(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Moved read_ions parameter to end of function in mgf.py |
377,522 | 13.10.2020 22:30:31 | -10,800 | b55601f86cb9ee515b88491d1eeeac20783fba06 | Undo str conversion of MGF ID | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -322,7 +322,7 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\nyield spectrum\ndef get_spectrum(self, key):\n- return self.get_by_id(str(key))\n+ return self.get_by_id(key)\ndef _get_time(self, spectrum):\ntry:\n"
},
{
"change_type": "UNKNOWN",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": ""
},
{
"change_type": "UNKNOWN",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": ""
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Undo str conversion of MGF ID |
377,522 | 13.10.2020 23:50:35 | -10,800 | 4898a25135ad0bb6314d693bc1b41ff57d70789e | Add an extra test for index access and ions | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -160,6 +160,10 @@ class MGFTest(unittest.TestCase):\nself.assertEqual(data.mgf_spectra_long[1], f.get_spectrum(key))\nself.assertEqual(data.mgf_spectra_long[1], mgf.get_spectrum(self.path, key))\n+ def test_key_access_ions(self):\n+ with mgf.IndexedMGF(self.path_annotated, read_ions=True) as f:\n+ np.testing.assert_array_equal(f['RAEYWENYPPAH||3']['ion array'], self.spectra_annotated[1]['ion array'])\n+\ndef test_read_list(self):\nkey = ['Spectrum 2', 'Spectrum 1']\nwith mgf.IndexedMGF(self.path) as f:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add an extra test for index access and ions |
377,522 | 14.10.2020 15:24:35 | -10,800 | 185c58d977c27b9a6eaf8b23a5271326bb22a1a7 | Add pyteomics/version, remove VERSION file | [
{
"change_type": "MODIFY",
"old_path": "MANIFEST.in",
"new_path": "MANIFEST.in",
"diff": "-include VERSION INSTALL README.rst\n+include INSTALL README.rst\n"
},
{
"change_type": "DELETE",
"old_path": "VERSION",
"new_path": null,
"diff": "-4.4.0dev1\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/conf.py",
"new_path": "doc/source/conf.py",
"diff": "@@ -82,7 +82,8 @@ copyright = u'2011-2020, Lev Levitsky, Anton Goloborodko, Mikhail Gorshkov'\n# built documents.\n#\n# The short X.Y version.\n-version = open('../../VERSION').readline().strip(' \\n\\t')\n+from pyteomics import version as ver\n+version = ver.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pyteomics/version.py",
"diff": "+__version__ = '4.4.0dev1'\n+\n+from collections import namedtuple\n+import re\n+\n+\n+class _VersionInfo(namedtuple('_VersionInfo', ('major', 'minor', 'micro', 'releaselevel', 'serial'))):\n+ def __new__(cls, version_str):\n+ groups = re.match(r'(\\d+)\\.(\\d+)(?:\\.)?(\\d+)?([a-zA-Z]+)?(\\d+)?', version_str).groups()\n+ inst = super(_VersionInfo, cls).__new__(cls, *groups)\n+ inst._version_str = version_str\n+ return inst\n+\n+ def __str__(self):\n+ return 'Pyteomics version {}'.format(self._version_str)\n+\n+\n+def version():\n+ return _VersionInfo(__version__)\n"
},
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -6,17 +6,35 @@ setup.py file for pyteomics\nfrom setuptools import setup\nimport re\n+import os\n+\n+\n+# from https://packaging.python.org/guides/single-sourcing-package-version/\n+\n+def read(rel_path):\n+ here = os.path.abspath(os.path.dirname(__file__))\n+ with open(os.path.join(here, rel_path), 'r') as fp:\n+ return fp.read()\n+\n+\n+def get_version(rel_path):\n+ for line in read(rel_path).splitlines():\n+ if line.startswith('__version__'):\n+ delim = '\"' if '\"' in line else \"'\"\n+ return line.split(delim)[1]\n+ else:\n+ raise RuntimeError(\"Unable to find version string.\")\n+\n-with open('VERSION') as v:\n- version = next(v).strip()\nwith open('README.rst') as r, open('INSTALL') as i:\nlong_description = re.sub(r':py:\\w+:`([^`]+)`',\nlambda m: '**{}**'.format(m.group(1)),\n''.join(r) + '\\n' + ''.join(i))\n+\nsetup(\nname = 'pyteomics',\n- version = version,\n+ version = get_version('pyteomics/version.py'),\ndescription = 'A framework for proteomics data analysis.',\nlong_description = long_description,\nauthor = 'Anton Goloborodko & Lev Levitsky',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add pyteomics/version, remove VERSION file |
377,522 | 14.10.2020 15:35:40 | -10,800 | bb4c6f9fa6d3e950026f271b720af248b66aa396 | Add tests for version parsing | [
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -11,6 +11,7 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom pyteomics import auxiliary as aux\nfrom pyteomics import tandem\n+from pyteomics import version\npsms = list(zip(count(), string.ascii_uppercase + string.ascii_lowercase,\nnp.arange(0.01, 0.062, 0.001)))\n@@ -920,6 +921,26 @@ class UseIndexTest(unittest.TestCase):\nself.assertIs(w[0].category, UserWarning)\n+class VersionTest(unittest.TestCase):\n+ def test_version_works(self):\n+ try:\n+ version.version()\n+ except Exception as e:\n+ self.fail('version() raised an exception: {!r}'.format(e))\n+\n+ def test_short_version(self):\n+ self.assertEqual(version._VersionInfo('1.2'), ('1', '2', None, None, None))\n+\n+ def test_longer_version(self):\n+ self.assertEqual(version._VersionInfo('1.2.3'), ('1', '2', '3', None, None))\n+\n+ def test_short_dev_version(self):\n+ self.assertEqual(version._VersionInfo('1.2dev3'), ('1', '2', None, 'dev', '3'))\n+\n+ def test_longer_dev_version(self):\n+ self.assertEqual(version._VersionInfo('1.2.3dev4'), ('1', '2', '3', 'dev', '4'))\n+\n+\nimport warnings\nif __name__ == '__main__':\nwith warnings.catch_warnings():\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add tests for version parsing |
377,522 | 14.10.2020 22:17:45 | -10,800 | 4fb36028b7f559f2b11103b719b8fad452e9502d | Pre-compute version_info | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -15,5 +15,5 @@ class _VersionInfo(namedtuple('_VersionInfo', ('major', 'minor', 'micro', 'relea\nreturn 'Pyteomics version {}'.format(self._version_str)\n-def version():\n- return _VersionInfo(__version__)\n+version_info = _VersionInfo(__version__)\n+version = __version__\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Pre-compute version_info |
377,522 | 15.10.2020 14:08:41 | -10,800 | 8354896f8836e1f71071367c117d83466b1cb24c | Update changelog, add minimal version doc | [
{
"change_type": "MODIFY",
"old_path": "doc/source/api.rst",
"new_path": "doc/source/api.rst",
"diff": "@@ -34,3 +34,4 @@ Contents:\napi/pylab_aux\napi/xml\napi/auxiliary\n+ api/version\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "doc/source/api/version.rst",
"diff": "+.. automodule :: pyteomics.version\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "-__version__ = '4.4.0dev1'\n+\"\"\"\n+version - Pyteomics version information\n+=======================================\n+\n+This module is provided for convenience and captures information about the current version number of Pyteomics.\n+\n+Constants\n+---------\n+\n+ :py:const:`version` - a string with the current version.\n+\n+ :py:const:`version_info` - a tuple with structured information about the current version.\n+\n+\"\"\"\n+\n+__version__ = '4.4.0dev2'\nfrom collections import namedtuple\nimport re\nclass _VersionInfo(namedtuple('_VersionInfo', ('major', 'minor', 'micro', 'releaselevel', 'serial'))):\n+ \"\"\"Tuple mimicking :py:const:`sys.version_info`\"\"\"\ndef __new__(cls, version_str):\ngroups = re.match(r'(\\d+)\\.(\\d+)(?:\\.)?(\\d+)?([a-zA-Z]+)?(\\d+)?', version_str).groups()\ninst = super(_VersionInfo, cls).__new__(cls, *groups)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update changelog, add minimal version doc |
377,522 | 22.10.2020 21:35:06 | -10,800 | 8e3a55877ea159fcb3299cab97e753ce12cf2d6d | Doc fix in pylab_aux.annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -387,9 +387,9 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\ntext_kw : dict, keyword only, optional\nKeyword arguments for :py:func:`pylab.text`.\nadjust_kw : dict, keyword only, optional\n- Keyword argyuments for `:py:func:`adjust_text`.\n+ Keyword argyuments for :py:func:`adjust_text`.\nion_comp : dict, keyword only, optional\n- A dictionary defining definitions of ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\n+ A dictionary defining ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\nmass_data : dict, keyword only, optional\nA dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\naa_mass : dict, keyword only, optional\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Doc fix in pylab_aux.annotate_spectrum |
377,522 | 25.10.2020 01:10:03 | -10,800 | 2ed706293af3083f889827c339a7118461945f8d | Fix linter warnings in parser and test_parser | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -119,6 +119,7 @@ std_labels = std_amino_acids + [std_nterm, std_cterm]\n_nterm_mod = r'[^-]+-$'\n_cterm_mod = r'-[^-]+$'\n+\ndef is_term_mod(label):\n\"\"\"Check if `label` corresponds to a terminal modification.\n@@ -145,6 +146,7 @@ def is_term_mod(label):\n\"\"\"\nreturn (re.match(_nterm_mod, label) or re.match(_cterm_mod, label)) is not None\n+\ndef match_modX(label):\n\"\"\"Check if `label` is a valid 'modX' label.\n@@ -158,6 +160,7 @@ def match_modX(label):\n\"\"\"\nreturn re.match(_modX_single, label)\n+\ndef is_modX(label):\n\"\"\"Check if `label` is a valid 'modX' label.\n@@ -182,6 +185,7 @@ def is_modX(label):\n\"\"\"\nreturn bool(match_modX(label))\n+\ndef length(sequence, **kwargs):\n\"\"\"Calculate the number of amino acid residues in a polypeptide\nwritten in modX notation.\n@@ -205,7 +209,8 @@ def length(sequence, **kwargs):\n>>> length('H-PEPTIDE-OH')\n7\n\"\"\"\n- if not sequence: return 0\n+ if not sequence:\n+ return 0\nif isinstance(sequence, str) or isinstance(sequence, list):\nif isinstance(sequence, str):\n@@ -219,11 +224,11 @@ def length(sequence, **kwargs):\nnum_term_groups += 1\nreturn len(parsed_sequence) - num_term_groups\nelif isinstance(sequence, dict):\n- return sum(amount for aa, amount in sequence.items()\n- if not is_term_mod(aa))\n+ return sum(amount for aa, amount in sequence.items() if not is_term_mod(aa))\nraise PyteomicsError('Unsupported type of sequence.')\n+\ndef _split_label(label):\ntry:\nmod, X = match_modX(label).groups()\n@@ -234,11 +239,13 @@ def _split_label(label):\nelse:\nreturn mod, X\n+\n_modX_sequence = re.compile(r'^([^-]+-)?((?:[^A-Z-]*[A-Z])+)(-[^-]+)?$')\n_modX_group = re.compile(r'[^A-Z-]*[A-Z]')\n_modX_split = re.compile(r'([^A-Z-]*)([A-Z])')\n_modX_single = re.compile(r'^([^A-Z-]*)([A-Z])$')\n+\ndef parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_modifications=False, **kwargs):\n\"\"\"Parse a sequence string written in modX notation into a list of\nlabels or (if `split` argument is :py:const:`True`) into a list of\n@@ -333,8 +340,7 @@ def parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_mo\nlabels = set(labels)\nfor term, std_term in zip([n, c], [std_nterm, std_cterm]):\nif term and term not in labels and not allow_unknown_modifications:\n- raise PyteomicsError(\n- 'Unknown label: {}'.format(term))\n+ raise PyteomicsError('Unknown label: {}'.format(term))\nfor group in parsed_sequence:\nif split:\nmod, X = group if len(group) == 2 else ('', group[0])\n@@ -343,8 +349,7 @@ def parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_mo\nif ((not mod) and X not in labels) or not ((mod + X in labels) or (\nX in labels and (\nmod in labels or allow_unknown_modifications))):\n- raise PyteomicsError(\n- 'Unknown label: {}'.format(group))\n+ raise PyteomicsError('Unknown label: {}'.format(group))\n# Append terminal labels\nif show_unmodified_termini or nterm != std_nterm:\n@@ -358,9 +363,9 @@ def parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_mo\nelse:\nparsed_sequence.append(cterm)\n-\nreturn parsed_sequence\n+\ndef valid(*args, **kwargs):\n\"\"\"Try to parse sequence and catch the exceptions.\nAll parameters are passed to :py:func:`parse`.\n@@ -377,6 +382,7 @@ def valid(*args, **kwargs):\nreturn False\nreturn True\n+\ndef fast_valid(sequence, labels=set(std_labels)):\n\"\"\"Iterate over `sequence` and check if all items are in `labels`.\nWith strings, this only works as expected on sequences without\n@@ -396,6 +402,7 @@ def fast_valid(sequence, labels=set(std_labels)):\n\"\"\"\nreturn set(sequence).issubset(labels)\n+\ndef tostring(parsed_sequence, show_unmodified_termini=True):\n\"\"\"Create a string from a parsed sequence.\n@@ -446,6 +453,7 @@ def tostring(parsed_sequence, show_unmodified_termini=True):\nlabels.append(''.join(cterm[:-1]))\nreturn ''.join(labels)\n+\ndef amino_acid_composition(sequence, show_unmodified_termini=False, term_aa=False, allow_unknown_modifications=False, **kwargs):\n\"\"\"Calculate amino acid composition of a polypeptide.\n@@ -520,6 +528,7 @@ def amino_acid_composition(sequence, show_unmodified_termini=False, term_aa=Fals\nreturn aa_dict\n+\n@memoize()\ndef cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exception=None):\n\"\"\"Cleaves a polypeptide sequence using a given rule.\n@@ -581,6 +590,7 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exce\n\"\"\"\nreturn set(_cleave(sequence, rule, missed_cleavages, min_length, semi, exception))\n+\ndef _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exception=None):\n\"\"\"Like :py:func:`cleave`, but the result is a list. Refer to\n:py:func:`cleave` for explanation of parameters.\n@@ -614,6 +624,7 @@ def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exc\npeptides.append(seq[k:])\nreturn peptides\n+\ndef num_sites(sequence, rule, **kwargs):\n\"\"\"Count the number of sites where `sequence` can be cleaved using\nthe given `rule` (e.g. number of miscleavages for a peptide).\n@@ -642,6 +653,7 @@ def num_sites(sequence, rule, **kwargs):\n\"\"\"\nreturn len(_cleave(sequence, rule, **kwargs)) - 1\n+\nexpasy_rules = {\n'arg-c': r'R',\n'asp-n': r'\\w(?=D)',\n@@ -701,6 +713,7 @@ exception=parser.expasy_rules['trypsin_exception'])\n{'PEPTIDKDE'}\n\"\"\"\n+\ndef isoforms(sequence, **kwargs):\n\"\"\"\nApply variable and fixed modifications to the polypeptide and yield\n@@ -783,11 +796,9 @@ def isoforms(sequence, **kwargs):\nm = main(group)[0]\nif m == 0 and not is_term_mod(mod):\ngroup.insert(0, mod)\n- elif mod[0] == '-' and (group[-1] == std_cterm or (\n- group[-1][0] == '-' and override)):\n+ elif mod[0] == '-' and (group[-1] == std_cterm or (group[-1][0] == '-' and override)):\ngroup[-1] = mod\n- elif mod[-1] == '-' and (group[0] == std_nterm or (\n- group[0][-1] == '-' and override)):\n+ elif mod[-1] == '-' and (group[0] == std_nterm or (group[0][-1] == '-' and override)):\ngroup[0] = mod\nelif not is_term_mod(mod):\nif m and not group[m - 1][-1] == '-':\n@@ -811,42 +822,38 @@ def isoforms(sequence, **kwargs):\n# Apply fixed modifications\nfor cmod in fixed_mods:\nfor i, group in enumerate(parsed):\n- if fixed_mods[cmod] == True or main(group)[1] in fixed_mods[cmod]:\n+ if fixed_mods[cmod] is True or main(group)[1] in fixed_mods[cmod]:\nparsed[i] = apply_mod(group, cmod)\n# Create a list of possible states for each group\n# Start with N-terminal mods and regular mods on the N-terminal residue\nsecond = set(apply_mod(parsed[0], m) for m, r in variable_mods.items()\n- if (r == True or\n- main(parsed[0])[1] in r or\n- 'nterm' + main(parsed[0])[1] in r or\n- (len(parsed) == 1 and 'cterm' + main(parsed[0])[1] in r))\n- and not is_term_mod(m)\n+ if (r is True or main(parsed[0])[1] in r or 'nterm' + main(parsed[0])[1] in r or (\n+ len(parsed) == 1 and 'cterm' + main(parsed[0])[1] in r)) and not is_term_mod(m)\n).union([parsed[0]])\nfirst = it.chain((apply_mod(group, mod) for group in second\nfor mod, res in variable_mods.items()\nif (mod.endswith('-') or (mod.startswith('-') and len(parsed) == 1))\n- and (res == True or main(group)[1] in res)), second)\n+ and (res is True or main(group)[1] in res)), second)\nstates = [[parsed[0]] + list(set(first).difference({parsed[0]}))]\n+\n# Continue with regular mods\n- states.extend([group] + list(set(apply_mod(group, mod)\n- for mod in variable_mods if (\n- variable_mods[mod] == True or\n- group[-1] in variable_mods[mod]) and not is_term_mod(mod)\n+ states.extend([group] + list(set(\n+ apply_mod(group, mod) for mod in variable_mods if (\n+ variable_mods[mod] is True or group[-1] in variable_mods[mod]) and not is_term_mod(mod)\n).difference({group}))\nfor group in parsed[1:-1])\n+\n# Finally add C-terminal mods and regular mods on the C-terminal residue\nif len(parsed) > 1:\nsecond = set(apply_mod(parsed[-1], m) for m, r in variable_mods.items()\n- if (r == True or\n- main(parsed[-1])[1] in r or\n- 'cterm' + main(parsed[-1])[1] in r)\n+ if (r is True or main(parsed[-1])[1] in r or 'cterm' + main(parsed[-1])[1] in r)\nand not is_term_mod(m)\n).union((parsed[-1],))\nfirst = it.chain((apply_mod(group, mod) for group in second\nfor mod, res in variable_mods.items()\nif mod.startswith('-') and (\n- res == True or main(group)[1] in res)), second)\n+ res is True or main(group)[1] in res)), second)\nstates.append([parsed[-1]] + list(set(first).difference({parsed[-1]})))\nsites = [s for s in enumerate(states) if len(s[1]) > 1]\n@@ -860,8 +867,7 @@ def isoforms(sequence, **kwargs):\nfor i, e in comb:\nskel[i] = e[1:]\nyield skel\n- possible_states = it.chain.from_iterable(\n- it.product(*skel) for skel in state_lists())\n+ possible_states = it.chain.from_iterable(it.product(*skel) for skel in state_lists())\nif format_ == 'split':\ndef strip_std_terms():\n@@ -880,6 +886,7 @@ def isoforms(sequence, **kwargs):\nelse:\nraise PyteomicsError('Unsupported value of \"format\": {}'.format(format_))\n+\ndef coverage(protein, peptides):\n\"\"\"Calculate how much of `protein` is covered by `peptides`.\nPeptides can overlap. If a peptide is found multiple times in `protein`,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -6,6 +6,7 @@ from pyteomics import parser\nfrom string import ascii_uppercase as uppercase\nimport random\n+\nclass ParserTest(unittest.TestCase):\ndef setUp(self):\nself.simple_sequences = [''.join(random.choice(uppercase) for i in range(\n@@ -33,10 +34,8 @@ class ParserTest(unittest.TestCase):\nself.assertEqual(['T', 'E', 'pS', 'T', 'oxM'],\nparser.parse('TEpSToxM', labels=parser.std_labels + ['pS', 'oxM']))\nself.assertEqual(\n- [('H-', 'z', 'P'), ('E',), ('P',), ('z', 'T'), ('I',), ('D',),\n- ('z', 'E', '-OH')],\n- parser.parse(\n- 'zPEPzTIDzE', True, True, labels=parser.std_labels+['z']))\n+ [('H-', 'z', 'P'), ('E',), ('P',), ('z', 'T'), ('I',), ('D',), ('z', 'E', '-OH')],\n+ parser.parse('zPEPzTIDzE', True, True, labels=parser.std_labels + ['z']))\ndef test_tostring(self):\nfor seq in self.simple_sequences:\n@@ -57,8 +56,7 @@ class ParserTest(unittest.TestCase):\nself.assertEqual(1, comp['nterm' + seq[0]])\nif len(seq) > 1:\nself.assertEqual(1, comp['cterm' + seq[-1]])\n- self.assertEqual(sum(comp_default.values()),\n- sum(comp.values()))\n+ self.assertEqual(sum(comp_default.values()), sum(comp.values()))\ndef test_cleave(self):\nself.assertEqual(parser._cleave('PEPTIDEKS', parser.expasy_rules['trypsin']), ['PEPTIDEK', 'S'])\n@@ -84,43 +82,29 @@ class ParserTest(unittest.TestCase):\nself.assertTrue(len(elem) >= ml)\ndef test_num_sites(self):\n- self.assertEqual(\n- parser.num_sites('RKCDE', 'K'), 1)\n- self.assertEqual(\n- parser.num_sites('RKCDE', 'E'), 0)\n- self.assertEqual(\n- parser.num_sites('RKCDE', 'R'), 1)\n- self.assertEqual(\n- parser.num_sites('RKCDE', 'Z'), 0)\n+ self.assertEqual(parser.num_sites('RKCDE', 'K'), 1)\n+ self.assertEqual(parser.num_sites('RKCDE', 'E'), 0)\n+ self.assertEqual(parser.num_sites('RKCDE', 'R'), 1)\n+ self.assertEqual(parser.num_sites('RKCDE', 'Z'), 0)\ndef test_isoforms_simple(self):\nself.assertEqual(\n- set(parser.isoforms('PEPTIDE',\n- variable_mods={'xx': ['A', 'B', 'P', 'E']})),\n- {'PEPTIDE', 'PEPTIDxxE', 'PExxPTIDE', 'PExxPTIDxxE', 'PxxEPTIDE',\n+ set(parser.isoforms('PEPTIDE', variable_mods={'xx': ['A', 'B', 'P', 'E']})),\n+ {\n+ 'PEPTIDE', 'PEPTIDxxE', 'PExxPTIDE', 'PExxPTIDxxE', 'PxxEPTIDE',\n'PxxEPTIDxxE', 'PxxExxPTIDE', 'PxxExxPTIDxxE', 'xxPEPTIDE',\n'xxPEPTIDxxE', 'xxPExxPTIDE', 'xxPExxPTIDxxE', 'xxPxxEPTIDE',\n- 'xxPxxEPTIDxxE', 'xxPxxExxPTIDE', 'xxPxxExxPTIDxxE'})\n+ 'xxPxxEPTIDxxE', 'xxPxxExxPTIDE', 'xxPxxExxPTIDxxE'\n+ })\ndef test_isoforms_universal(self):\n- self.assertEqual(\n- set(parser.isoforms('PEPTIDE',\n- variable_mods={'xx-': True})),\n- {'PEPTIDE', 'xx-PEPTIDE'})\n- self.assertEqual(\n- set(parser.isoforms('PEPTIDE',\n- variable_mods={'-xx': True})),\n- {'PEPTIDE', 'PEPTIDE-xx'})\n+ self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'xx-': True})), {'PEPTIDE', 'xx-PEPTIDE'})\n+ self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'-xx': True})), {'PEPTIDE', 'PEPTIDE-xx'})\nfor seq in self.simple_sequences:\n- self.assertEqual(\n- sum(1 for _ in parser.isoforms(seq,\n- variable_mods={'x': True})),\n- 2**len(seq))\n+ self.assertEqual(sum(1 for _ in parser.isoforms(seq, variable_mods={'x': True})), 2**len(seq))\ndef test_isoforms_terminal(self):\n- self.assertEqual(\n- set(parser.isoforms('PEPTIDE',\n- variable_mods={'xx': ['ntermP'], 'yy-': 'P'})),\n+ self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'xx': ['ntermP'], 'yy-': 'P'})),\n{'PEPTIDE', 'xxPEPTIDE', 'yy-PEPTIDE', 'yy-xxPEPTIDE'})\ndef test_isoforms_len(self):\n@@ -132,13 +116,13 @@ class ParserTest(unittest.TestCase):\nforms = sum(1 for x in modseqs)\npp = parser.parse(peptide, labels=self.extlabels)\nN = 0\n- if pp[0] =='N': N += 1\n- if pp[-1] == 'C': N += 1\n+ if pp[0] == 'N':\n+ N += 1\n+ if pp[-1] == 'C':\n+ N += 1\nfor p in modseqs:\n- self.assertEqual(len(pp),\n- parser.length(p, labels=self.extlabels))\n- self.assertEqual(forms, (3**pp.count('A')) *\n- (2**(pp.count('X')+pp.count('C'))) * 2**N)\n+ self.assertEqual(len(pp), parser.length(p, labels=self.extlabels))\n+ self.assertEqual(forms, (3**pp.count('A')) * (2 ** (pp.count('X') + pp.count('C'))) * 2 ** N)\ndef test_isoforms_maxmods(self):\nfor j in range(50):\n@@ -146,8 +130,7 @@ class ParserTest(unittest.TestCase):\nM = random.randint(1, 10)\npeptide = ''.join([random.choice(self.labels) for _ in range(L)])\nmodseqs = parser.isoforms(peptide, variable_mods=self.potential,\n- labels=self.labels,\n- max_mods=M, format='split')\n+ labels=self.labels, max_mods=M, format='split')\npp = parser.parse(peptide, labels=self.extlabels, split=True)\nfor ms in modseqs:\nself.assertEqual(len(pp), len(ms))\n@@ -165,7 +148,6 @@ class ParserTest(unittest.TestCase):\nself.assertFalse(parser.fast_valid(bad, labels=self.labels))\nself.assertFalse(parser.valid(bad, labels=self.labels))\n-\ndef test_valid(self):\nfor j in range(50):\nL = random.randint(1, 10)\n@@ -180,6 +162,7 @@ class ParserTest(unittest.TestCase):\nself.assertFalse(parser.fast_valid(bad, labels=self.labels))\nself.assertFalse(parser.valid(bad, labels=self.labels))\n+\nif __name__ == '__main__':\nimport doctest\ndoctest.testmod(parser)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix linter warnings in parser and test_parser |
377,522 | 25.10.2020 03:17:30 | -10,800 | afd33e96705674389105be42e7c5ab6b2a223931 | Stabilize output ordering in parser.isoforms (issue | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -789,11 +789,12 @@ def isoforms(sequence, **kwargs):\nreturn len(group) + i, group[i]\ndef apply_mod(label, mod):\n- # `label` is assumed to be a tuple (see split option of parse)\n+ # `label` is assumed to be a tuple (see split option of `parse`)\n# unmodified termini are assumed shown\n- # if the modification is not applicable, `label` is returned\n+ # if the modification is not applicable, `None` is returned\ngroup = list(label)\nm = main(group)[0]\n+ c = True # whether the change is applied in the end\nif m == 0 and not is_term_mod(mod):\ngroup.insert(0, mod)\nelif mod[0] == '-' and (group[-1] == std_cterm or (group[-1][0] == '-' and override)):\n@@ -801,14 +802,25 @@ def isoforms(sequence, **kwargs):\nelif mod[-1] == '-' and (group[0] == std_nterm or (group[0][-1] == '-' and override)):\ngroup[0] = mod\nelif not is_term_mod(mod):\n- if m and not group[m - 1][-1] == '-':\n+ if m and group[m - 1][-1] != '-':\nif override:\ngroup[m - 1] = mod\n+ else:\n+ c = False\nelse:\ngroup.insert(m, mod)\n+ else:\n+ c = False\n+ if c:\nreturn tuple(group)\nvariable_mods = kwargs.get('variable_mods', {})\n+ varmods_term, varmods_non_term = [], []\n+ for m, r in sorted(variable_mods.items()):\n+ if is_term_mod(m):\n+ varmods_term.append((m, r))\n+ else:\n+ varmods_non_term.append((m, r))\nfixed_mods = kwargs.get('fixed_mods', {})\nparse_kw = {}\nif 'labels' in kwargs:\n@@ -827,34 +839,51 @@ def isoforms(sequence, **kwargs):\n# Create a list of possible states for each group\n# Start with N-terminal mods and regular mods on the N-terminal residue\n- second = set(apply_mod(parsed[0], m) for m, r in variable_mods.items()\n- if (r is True or main(parsed[0])[1] in r or 'nterm' + main(parsed[0])[1] in r or (\n- len(parsed) == 1 and 'cterm' + main(parsed[0])[1] in r)) and not is_term_mod(m)\n- ).union([parsed[0]])\n- first = it.chain((apply_mod(group, mod) for group in second\n- for mod, res in variable_mods.items()\n- if (mod.endswith('-') or (mod.startswith('-') and len(parsed) == 1))\n- and (res is True or main(group)[1] in res)), second)\n- states = [[parsed[0]] + list(set(first).difference({parsed[0]}))]\n+ states = [[parsed[0]]]\n+ m0 = main(parsed[0])[1]\n+ for m, r in varmods_non_term:\n+ if r is True or m0 in r or 'nterm' + m0 in r or len(parsed) == 1 and 'cterm' + m0 in r:\n+ applied = apply_mod(parsed[0], m)\n+ if applied is not None:\n+ states[0].append(applied)\n+ more_states = []\n+ for m, r in varmods_term:\n+ if r is True or m0 in r:\n+ if m[-1] == '-' or len(parsed) == 1:\n+ for group in states[0]:\n+ applied = apply_mod(group, m)\n+ if applied is not None:\n+ more_states.append(applied)\n+ states[0].extend(more_states)\n# Continue with regular mods\n- states.extend([group] + list(set(\n- apply_mod(group, mod) for mod in variable_mods if (\n- variable_mods[mod] is True or group[-1] in variable_mods[mod]) and not is_term_mod(mod)\n- ).difference({group}))\n- for group in parsed[1:-1])\n+ for group in parsed[1:-1]:\n+ gstates = [group]\n+ for m, r in varmods_non_term:\n+ if r is True or group[-1] in r:\n+ applied = apply_mod(group, m)\n+ if applied is not None:\n+ gstates.append(applied)\n+ states.append(gstates)\n# Finally add C-terminal mods and regular mods on the C-terminal residue\nif len(parsed) > 1:\n- second = set(apply_mod(parsed[-1], m) for m, r in variable_mods.items()\n- if (r is True or main(parsed[-1])[1] in r or 'cterm' + main(parsed[-1])[1] in r)\n- and not is_term_mod(m)\n- ).union((parsed[-1],))\n- first = it.chain((apply_mod(group, mod) for group in second\n- for mod, res in variable_mods.items()\n- if mod.startswith('-') and (\n- res is True or main(group)[1] in res)), second)\n- states.append([parsed[-1]] + list(set(first).difference({parsed[-1]})))\n+ states.append([parsed[-1]])\n+ m1 = main(parsed[-1])[1]\n+ for m, r in varmods_non_term:\n+ if r is True or m1 in r or 'cterm' + m1 in r or len(parsed) == 1 and 'nterm' + m1 in r:\n+ applied = apply_mod(parsed[-1], m)\n+ if applied is not None:\n+ states[-1].append(applied)\n+ more_states = []\n+ for m, r in varmods_term:\n+ if r is True or m1 in r:\n+ if m[0] == '-' or len(parsed) == 1:\n+ for group in states[-1]:\n+ applied = apply_mod(group, m)\n+ if applied is not None:\n+ more_states.append(applied)\n+ states[-1].extend(more_states)\nsites = [s for s in enumerate(states) if len(s[1]) > 1]\nif max_mods is None or max_mods > len(sites):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -97,6 +97,10 @@ class ParserTest(unittest.TestCase):\n'xxPxxEPTIDxxE', 'xxPxxExxPTIDE', 'xxPxxExxPTIDxxE'\n})\n+ def test_isoforms_simple_2(self):\n+ self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'x': 'T', 'y': 'T'})),\n+ {'PEPTIDE', 'PEPxTIDE', 'PEPyTIDE'})\n+\ndef test_isoforms_universal(self):\nself.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'xx-': True})), {'PEPTIDE', 'xx-PEPTIDE'})\nself.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'-xx': True})), {'PEPTIDE', 'PEPTIDE-xx'})\n@@ -110,19 +114,14 @@ class ParserTest(unittest.TestCase):\ndef test_isoforms_len(self):\nfor j in range(50):\nL = random.randint(1, 10)\n- peptide = ''.join([random.choice(self.labels) for _ in range(L)])\n- modseqs = parser.isoforms(peptide, variable_mods=self.potential,\n- fixed_mods=self.constant, labels=self.labels)\n- forms = sum(1 for x in modseqs)\n+ peptide = ''.join(random.choice(self.labels) for _ in range(L))\n+ modseqs = list(parser.isoforms(peptide, variable_mods=self.potential,\n+ fixed_mods=self.constant, labels=self.labels))\npp = parser.parse(peptide, labels=self.extlabels)\n- N = 0\n- if pp[0] == 'N':\n- N += 1\n- if pp[-1] == 'C':\n- N += 1\n+ N = (pp[0] == 'N') + (pp[-1] == 'C')\nfor p in modseqs:\nself.assertEqual(len(pp), parser.length(p, labels=self.extlabels))\n- self.assertEqual(forms, (3**pp.count('A')) * (2 ** (pp.count('X') + pp.count('C'))) * 2 ** N)\n+ self.assertEqual(len(modseqs), (3 ** pp.count('A')) * (2 ** (pp.count('X') + pp.count('C') + N)))\ndef test_isoforms_maxmods(self):\nfor j in range(50):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Stabilize output ordering in parser.isoforms (issue #15) |
377,522 | 25.10.2020 03:29:29 | -10,800 | 46305658a4a49c0c521af45cafc379d20c311bc5 | Use lists in tests to ensure order | [
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -89,17 +89,14 @@ class ParserTest(unittest.TestCase):\ndef test_isoforms_simple(self):\nself.assertEqual(\n- set(parser.isoforms('PEPTIDE', variable_mods={'xx': ['A', 'B', 'P', 'E']})),\n- {\n- 'PEPTIDE', 'PEPTIDxxE', 'PExxPTIDE', 'PExxPTIDxxE', 'PxxEPTIDE',\n- 'PxxEPTIDxxE', 'PxxExxPTIDE', 'PxxExxPTIDxxE', 'xxPEPTIDE',\n- 'xxPEPTIDxxE', 'xxPExxPTIDE', 'xxPExxPTIDxxE', 'xxPxxEPTIDE',\n- 'xxPxxEPTIDxxE', 'xxPxxExxPTIDE', 'xxPxxExxPTIDxxE'\n- })\n+ list(parser.isoforms('PEPTIDE', variable_mods={'xx': ['A', 'B', 'P', 'E']})),\n+ ['PEPTIDE', 'PEPTIDxxE', 'PExxPTIDE', 'PExxPTIDxxE', 'PxxEPTIDE', 'PxxEPTIDxxE', 'PxxExxPTIDE',\n+ 'PxxExxPTIDxxE', 'xxPEPTIDE', 'xxPEPTIDxxE', 'xxPExxPTIDE', 'xxPExxPTIDxxE', 'xxPxxEPTIDE',\n+ 'xxPxxEPTIDxxE', 'xxPxxExxPTIDE', 'xxPxxExxPTIDxxE'])\ndef test_isoforms_simple_2(self):\n- self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'x': 'T', 'y': 'T'})),\n- {'PEPTIDE', 'PEPxTIDE', 'PEPyTIDE'})\n+ self.assertEqual(list(parser.isoforms('PEPTIDE', variable_mods={'x': 'T', 'y': 'T'})),\n+ ['PEPTIDE', 'PEPxTIDE', 'PEPyTIDE'])\ndef test_isoforms_universal(self):\nself.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'xx-': True})), {'PEPTIDE', 'xx-PEPTIDE'})\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Use lists in tests to ensure order |
377,522 | 01.11.2020 23:21:56 | -10,800 | 0c40ead9615ca1404b7a2d52baf600638f3c7674 | Fix for fixed mods (no pun intended) | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -832,10 +832,10 @@ def isoforms(sequence, **kwargs):\nformat_ = kwargs.get('format', 'str')\n# Apply fixed modifications\n- for cmod in fixed_mods:\n+ for cmod, res in fixed_mods.items():\nfor i, group in enumerate(parsed):\n- if fixed_mods[cmod] is True or main(group)[1] in fixed_mods[cmod]:\n- parsed[i] = apply_mod(group, cmod)\n+ if res is True or main(group)[1] in res:\n+ parsed[i] = apply_mod(group, cmod) or parsed[i]\n# Create a list of possible states for each group\n# Start with N-terminal mods and regular mods on the N-terminal residue\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.0dev2'\n+__version__ = '4.4.0dev3'\nfrom collections import namedtuple\nimport re\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -94,6 +94,11 @@ class ParserTest(unittest.TestCase):\n'PxxExxPTIDxxE', 'xxPEPTIDE', 'xxPEPTIDxxE', 'xxPExxPTIDE', 'xxPExxPTIDxxE', 'xxPxxEPTIDE',\n'xxPxxEPTIDxxE', 'xxPxxExxPTIDE', 'xxPxxExxPTIDxxE'])\n+ def test_isoforms_fixed_simple(self):\n+ self.assertEqual(\n+ list(parser.isoforms('PEPTIDE', fixed_mods={'n-': True, '-c': True, 'x': ['P', 'T']})),\n+ ['n-xPExPxTIDE-c'])\n+\ndef test_isoforms_simple_2(self):\nself.assertEqual(list(parser.isoforms('PEPTIDE', variable_mods={'x': 'T', 'y': 'T'})),\n['PEPTIDE', 'PEPxTIDE', 'PEPyTIDE'])\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix for fixed mods (no pun intended) |
377,522 | 02.11.2020 19:48:06 | -10,800 | 6b32664b113b098766f401488d50e2bfec75eded | Use provided value of use_index with files passed by name. Fixes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -49,6 +49,7 @@ except ImportError:\nfrom . import PyteomicsError\n+\ndef _keepstate(func):\n\"\"\"Decorator to help keep the position in open files passed as\npositional arguments to functions\"\"\"\n@@ -129,12 +130,13 @@ class _file_obj(object):\ndef __iter__(self):\nreturn iter(self.file)\n+\nclass NoOpBaseReader(object):\ndef __init__(self, *args, **kwargs):\npass\n-class IteratorContextManager(NoOpBaseReader):\n+class IteratorContextManager(NoOpBaseReader):\ndef __init__(self, *args, **kwargs):\nself._func = kwargs.pop('parser_func')\nself._args = args\n@@ -217,6 +219,7 @@ class FileReader(IteratorContextManager):\nraise AttributeError\nreturn getattr(self._source, attr)\n+\ndef remove_bom(bstr):\nreturn bstr.replace(codecs.BOM_LE, b'').lstrip(b\"\\x00\")\n@@ -618,6 +621,7 @@ class WritableIndex(object):\nclass OffsetIndex(OrderedDict, WritableIndex):\n'''An augmented OrderedDict that formally wraps getting items by index\n'''\n+\ndef __init__(self, *args, **kwargs):\nsuper(OffsetIndex, self).__init__(*args, **kwargs)\nself._index_sequence = None\n@@ -878,10 +882,10 @@ def _make_chain(reader, readername, full_output=False):\ndef _check_use_index(source, use_index, default):\ntry:\n- if isinstance(source, basestring):\n- return default\nif use_index is not None:\nuse_index = bool(use_index)\n+ if isinstance(source, basestring):\n+ return use_index if use_index is not None else default\nseekable = True\nif hasattr(source, 'seekable'):\nif not source.seekable():\n@@ -920,6 +924,7 @@ class FileReadingProcess(mp.Process):\nThe reader class must support the :py:meth:`__getitem__` dict-like lookup.\n\"\"\"\n+\ndef __init__(self, reader_spec, target_spec, qin, qout, args_spec, kwargs_spec):\nsuper(FileReadingProcess, self).__init__(name='pyteomics-map-worker')\nself.reader_spec = reader_spec\n@@ -949,6 +954,7 @@ class FileReadingProcess(mp.Process):\ndef is_done(self):\nreturn self._done_flag.is_set()\n+\ntry:\n_NPROC = mp.cpu_count()\nexcept NotImplementedError:\n@@ -956,6 +962,7 @@ except NotImplementedError:\n_QUEUE_TIMEOUT = 4\n_QUEUE_SIZE = int(1e7)\n+\nclass TaskMappingMixin(NoOpBaseReader):\ndef __init__(self, *args, **kwargs):\n'''\n@@ -1061,7 +1068,6 @@ class TaskMappingMixin(NoOpBaseReader):\nserialized = self._build_worker_spec(target, args, kwargs)\n-\nin_queue = mp.Queue(self._queue_size)\nout_queue = mp.Queue(self._queue_size)\n@@ -1098,7 +1104,6 @@ class TaskMappingMixin(NoOpBaseReader):\nreturn iter(self._offset_index.keys())\n-\nclass ChainBase(object):\n\"\"\"Chain :meth:`sequence_maker` for several sources into a\nsingle iterable. Positional arguments should be sources like\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.0dev3'\n+__version__ = '4.4.0dev4'\nfrom collections import namedtuple\nimport re\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -892,6 +892,11 @@ class UseIndexTest(unittest.TestCase):\ndef _check_file_object(self, fo, value):\nself.assertEqual(aux._check_use_index(fo, None, None), value)\n+ def test_str_name(self):\n+ for ui in [False, True]:\n+ for default in [False, True]:\n+ self.assertEqual(aux._check_use_index('test.mgf', ui, default), ui)\n+\ndef test_textfile(self):\nwith open('test.fasta') as f:\nself._check_file_object(f, False)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Use provided value of use_index with files passed by name. Fixes #16 |
377,522 | 07.11.2020 19:08:21 | -10,800 | 2e34d2cbc1fee3d9d8055026f88542ffbcfe1f8b | Rename types to ion_types in annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- Make the order of isoforms reproducible in :py:func:`pyteomics.parser.isoforms`\n(`#15 <https://github.com/levitsky/pyteomics/issues/15>`_).\n+ - Rename `types` keyword argument to `ion_types` in :py:func:`pyteomics.pylab_aux.annotate_spectrum`.\n+\n- Fix `#16 <https://github.com/levitsky/pyteomics/issues/16>`_, a bug introduced in 4.3.3.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -372,7 +372,7 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nA modX sequence.\ncentroided : bool, optional\nPassed to :py:func:`plot_spectrum`.\n- types : Container, keyword only, optional\n+ ion_types : Container, keyword only, optional\nIon types to be considered for annotation. Default is `('b', 'y')`.\nmaxcharge : int, keyword only, optional\nMaximum charge state for fragment ions to be considered. Default is `1`.\n@@ -399,7 +399,7 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\n**kwargs\nPassed to :py:func:`plot_spectrum`.\n\"\"\"\n- types = kwargs.pop('types', ('b', 'y'))\n+ types = kwargs.pop('ion_types', ('b', 'y'))\nmaxcharge = kwargs.pop('maxcharge', 1)\naa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\nmass_data = kwargs.pop('mass_data', mass.nist_mass)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Rename types to ion_types in annotate_spectrum |
377,522 | 08.12.2020 04:44:40 | -10,800 | aa0791e64038f650e554fde439e9035b02cf4279 | Add Python 3.9 to test runner | [
{
"change_type": "MODIFY",
"old_path": "tests/runtests.sh",
"new_path": "tests/runtests.sh",
"diff": "#!/bin/bash\nexport PYTHONPATH=\"..\"\nif [ $# -eq 0 ]; then\n- find . -name 'test_*.py' -exec bash -c 'declare -a versions=(2.7 3.3 3.4 3.5 3.6 3.7 3.8); for v in \"${versions[@]}\"; do command -v \"python${v}\" > /dev/null 2>&1 && { echo \"Executing python${v}\" \"$0\"; eval \"python${v}\" \"$0\"; }; done' {} \\;\n+ find . -name 'test_*.py' -exec bash -c 'declare -a versions=(2.7 3.3 3.4 3.5 3.6 3.7 3.8 3.9); for v in \"${versions[@]}\"; do command -v \"python${v}\" > /dev/null 2>&1 && { echo \"Executing python${v}\" \"$0\"; eval \"python${v}\" \"$0\"; }; done' {} \\;\nelse\nfor f; do\n- for v in 2.7 3.3 3.4 3.5 3.6 3.7 3.8; do\n+ for v in 2.7 3.3 3.4 3.5 3.6 3.7 3.8 3.9; do\ncommand -v \"python${v}\" >/dev/null 2>&1 && { echo \"Executing python${v}\" \"$f\"; eval \"python${v}\" \"$f\"; }\ndone\ndone\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add Python 3.9 to test runner |
377,522 | 08.12.2020 05:54:42 | -10,800 | af098336f5489313d256121a0dd7410871ed1697 | Add Python 3.9 in Github Actions | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/pythonpackage.yml",
"new_path": ".github/workflows/pythonpackage.yml",
"diff": "@@ -8,7 +8,7 @@ jobs:\nruns-on: ubuntu-latest\nstrategy:\nmatrix:\n- python-version: [2.7, 3.5, 3.6, 3.7, 3.8]\n+ python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9]\nsteps:\n- uses: actions/checkout@v2\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add Python 3.9 in Github Actions |
377,522 | 08.12.2020 05:56:13 | -10,800 | e7724962febfbae60dc7e8049bb56a8339230cf4 | Rewrite _check_use_index and expand its tests | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+dev\n+---\n+\n+Further tweaked behavior of :py:func:`pyteomics.auxiliary.file_helpers._check_use_index`, which is responsible for\n+handling of `use_index` in :py:func:`read` functions in parser modules.\n+\n4.4\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -55,8 +55,7 @@ def _keepstate(func):\npositional arguments to functions\"\"\"\n@wraps(func)\ndef wrapped(*args, **kwargs):\n- positions = [getattr(arg, 'seek', None) and\n- getattr(arg, 'tell', type(None))() for arg in args]\n+ positions = [getattr(arg, 'seek', None) and getattr(arg, 'tell', type(None))() for arg in args]\nfor arg, pos in zip(args, positions):\nif pos is not None:\narg.seek(0)\n@@ -884,34 +883,46 @@ def _check_use_index(source, use_index, default):\ntry:\nif use_index is not None:\nuse_index = bool(use_index)\n+\n+ # if a file name is given, do not override anything; short-circuit\nif isinstance(source, basestring):\nreturn use_index if use_index is not None else default\n- seekable = True\n+\n+ # collect information on source\nif hasattr(source, 'seekable'):\n- if not source.seekable():\n- use_index = False\n- seekable = False\n+ seekable = source.seekable()\n+ else:\n+ seekable = None\n+\nif hasattr(source, 'mode'):\n- ui = 'b' in source.mode\n- if use_index is not None and ui != use_index:\n- warnings.warn('use_index is {}, but the file mode is {}. '\n- 'Setting use_index to {}'.format(use_index, source.mode, ui))\n- use_index = ui\n+ binary = 'b' in source.mode\n+ else:\n+ binary = None\n- if use_index and not seekable:\n+ # now check for conflicts\n+ if seekable is False:\n+ if binary:\n+ raise PyteomicsError('Cannot work with non-seekable file in binary mode: {}.'.format(source))\n+ if use_index:\nwarnings.warn('Cannot use indexing as {} is not seekable. Setting `use_index` to False.'.format(source))\nuse_index = False\n+ elif binary is not None:\n+ if use_index is not None and binary != use_index:\n+ warnings.warn('use_index is {}, but the file mode is {}. '\n+ 'Setting `use_index` to {}'.format(use_index, source.mode, binary))\n+ use_index = binary\n+ else:\n+ warnings.warn('Could not check mode on {}. Specify `use_index` explicitly to avoid errors.'.format(source))\nif use_index is not None:\nreturn use_index\n- warnings.warn('Could not check mode on {}. '\n- 'Specify `use_index` explicitly to avoid errors.'.format(source))\nreturn default\n+ except PyteomicsError:\n+ raise\nexcept Exception as e:\n- warnings.warn('Could not check mode on {}. Reason: {!r}. '\n- 'Specify `use_index` explicitly to avoid errors.'.format(source, e))\n+ warnings.warn('Could not check mode on {}. Reason: {!r}. Specify `use_index` explicitly to avoid errors.'.format(source, e))\nif use_index is not None:\nreturn use_index\nreturn default\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -310,6 +310,7 @@ def roc_curve(source):\nreturn roc_curve\n+\n# chain = aux._make_chain(read, 'read')\nchain = aux.ChainBase._make_chain(read)\n@@ -334,15 +335,15 @@ def _is_decoy_prefix(psm, prefix='DECOY_'):\nreturn all(protein['protein'].startswith(prefix)\nfor protein in psm['search_hit'][0]['proteins'])\n+\ndef _is_decoy_suffix(psm, suffix='_DECOY'):\nreturn all(protein['protein'].endswith(suffix)\nfor protein in psm['search_hit'][0]['proteins'])\n-is_decoy = _is_decoy_prefix\n+is_decoy = _is_decoy_prefix\nfdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)\n-_key = lambda x: min(\n- sh['search_score']['expect'] for sh in x['search_hit'])\n+_key = lambda x: min(sh['search_score']['expect'] for sh in x['search_hit'])\nqvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)\nfilter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, _key, qvalues)\nfilter.chain = aux._make_chain(filter, 'filter', True)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.0'\n+__version__ = '4.4.1a1'\nfrom collections import namedtuple\nimport re\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -889,8 +889,22 @@ class OffsetIndexTest(unittest.TestCase):\nclass UseIndexTest(unittest.TestCase):\n+ class MockFile:\n+ def __init__(self, seekable, mode):\n+ if mode is not None:\n+ self.mode = mode\n+ if seekable is not None:\n+ self._seekable = seekable\n+\n+ def seekable(self):\n+ return getattr(self, '_seekable', None)\n+\ndef _check_file_object(self, fo, value):\n- self.assertEqual(aux._check_use_index(fo, None, None), value)\n+ with warnings.catch_warnings(record=True) as w:\n+ warnings.simplefilter('always')\n+ result = aux._check_use_index(fo, None, None)\n+ self.assertEqual(len(w), 0)\n+ self.assertEqual(result, value)\ndef test_str_name(self):\nfor ui in [False, True]:\n@@ -925,6 +939,38 @@ class UseIndexTest(unittest.TestCase):\nself.assertEqual(len(w), 1)\nself.assertIs(w[0].category, UserWarning)\n+ def test_error_not_seekable(self):\n+ source = UseIndexTest.MockFile(False, 'rb')\n+ self.assertRaises(aux.PyteomicsError, aux._check_use_index, source, None, None)\n+\n+ def test_warning_not_seekable(self):\n+ source = UseIndexTest.MockFile(False, 'r')\n+ with warnings.catch_warnings(record=True) as w:\n+ warnings.simplefilter('always')\n+ aux._check_use_index(source, True, None)\n+ self.assertEqual(len(w), 1)\n+ self.assertIs(w[0].category, UserWarning)\n+ self.assertIn('is not seekable', str(w[0].message))\n+\n+ def test_warning_wrong_mode(self):\n+ for m in ['rb', 'r']:\n+ source = UseIndexTest.MockFile(True, m)\n+ with warnings.catch_warnings(record=True) as w:\n+ warnings.simplefilter('always')\n+ aux._check_use_index(source, 'b' not in m, None)\n+ self.assertEqual(len(w), 1)\n+ self.assertIs(w[0].category, UserWarning)\n+ self.assertIn('Setting `use_index` to {}'.format('b' in m), str(w[0].message))\n+\n+ def test_warning_no_mode(self):\n+ source = UseIndexTest.MockFile(None, None)\n+ with warnings.catch_warnings(record=True) as w:\n+ warnings.simplefilter('always')\n+ aux._check_use_index(source, True, None)\n+ self.assertEqual(len(w), 1)\n+ self.assertIs(w[0].category, UserWarning)\n+ self.assertIn('Could not check mode', str(w[0].message))\n+\nclass VersionTest(unittest.TestCase):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Rewrite _check_use_index and expand its tests |
377,522 | 11.12.2020 20:37:10 | -10,800 | 7fce8e55d6fab889498c435bfe07e040f3f82445 | Advertise Github Discussions | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "dev\n---\n-Further tweaked behavior of :py:func:`pyteomics.auxiliary.file_helpers._check_use_index`, which is responsible for\n+ - Further tweaked behavior of :py:func:`pyteomics.auxiliary.file_helpers._check_use_index`, which is responsible for\nhandling of `use_index` in :py:func:`read` functions in parser modules.\n+ - Also, check out the `Pyteomics Discussions page <https://github.com/levitsky/pyteomics/discussions>`_!\n+ You can use it to share your thoughts, ask questions, discuss coding practices, etc.\n+\n4.4\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "INFO",
"new_path": "INFO",
"diff": "@@ -26,7 +26,8 @@ Additional, third-party packages extending the Pyteomics functionality can be in\nFeedback & Support\n------------------\n-Please email to pyteomics@googlegroups.com with any questions about Pyteomics.\n-You are welcome to use the Github\n-`issue tracker <https://github.com/levitsky/pyteomics/issues>`_\n-to report bugs, request features, etc.\n+Your questions and suggestions are welcome at:\n+\n+ - pyteomics@googlegroups.com mailing list;\n+ - *(new!)* `Pyteomics Discussions page <https://github.com/levitsky/pyteomics/discussions>`_ on Github;\n+ - Github `issue tracker <https://github.com/levitsky/pyteomics/issues>`_ (for bugs, feature requests, etc.)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Advertise Github Discussions |
377,522 | 14.12.2020 18:45:43 | -10,800 | 319cf52e7b1e967eac375d11d1ead2f17d12c2d0 | Use quotes in index keys in pepxml test data | [
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -214,7 +214,7 @@ pepxml_results = [\n{'protein': 'BL_ORD_ID:154629',\n'protein_descr': 'sp|Q99676|ZN184_HUMAN Zinc finger protein 184 OS=Homo sapiens GN=ZNF184 PE=1 SV=4:reversed'}],\n'tot_num_ions': 44}],\n- 'spectrum': ' Cmpd 24, +MSn(1085.6886), 1.2 min.23.23.3',\n+ 'spectrum': '\"Cmpd 24, +MSn(1085.6886), 1.2 min.23.23.3\"',\n'start_scan': 23}]\nmzid_spectra = {(False, False): [{'id': 'SEQ_spec1',\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test.pep.xml",
"new_path": "tests/test.pep.xml",
"diff": "</search_hit>\n</search_result>\n</spectrum_query>\n- <spectrum_query spectrum=\" Cmpd 24, +MSn(1085.6886), 1.2 min.23.23.3\" start_scan=\"23\" end_scan=\"23\" precursor_neutral_mass=\"3254.044921875\" assumed_charge=\"3\" index=\"3\">\n+ <spectrum_query spectrum=\""Cmpd 24, +MSn(1085.6886), 1.2 min.23.23.3"\" start_scan=\"23\" end_scan=\"23\" precursor_neutral_mass=\"3254.044921875\" assumed_charge=\"3\" index=\"3\">\n<search_result>\n<search_hit hit_rank=\"1\" peptide=\"DQQFDSSSSMALEDCGEETNCQSDFK\" peptide_prev_aa=\"R\" peptide_next_aa=\"I\" protein=\"BL_ORD_ID:125453\" num_tot_proteins=\"1\" num_matched_ions=\"3\" tot_num_ions=\"50\" calc_neutral_pep_mass=\"3254.04711914062\" massdiff=\"-0.002197265625\" is_rejected=\"0\" protein_descr=\"sp|O43149|ZZEF1_HUMAN Zinc finger ZZ-type and EF-hand domain-containing protein 1 OS=Homo sapiens GN=ZZEF1 PE=1 SV=6:reversed\">\n<modification_info modified_peptide=\"DQQFDS[166]S[166]SS[166]MALEDCGEETNCQSDFK\">\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_pepxml.py",
"new_path": "tests/test_pepxml.py",
"diff": "@@ -6,6 +6,7 @@ import unittest\nfrom pyteomics.pepxml import PepXML, read, chain, filter\nfrom data import pepxml_results\n+\nclass PepxmlTest(unittest.TestCase):\nmaxDiff = None\npath = 'test.pep.xml'\n@@ -33,5 +34,6 @@ class PepxmlTest(unittest.TestCase):\nself.assertEqual(list(reader.index['spectrum_query']), specs)\nself.assertEqual(reader[specs[-1]], pepxml_results[-1])\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Use quotes in index keys in pepxml test data |
377,522 | 18.12.2020 04:50:40 | -10,800 | bff108cabd758a6a36511a2ba3e09bc127a2b19c | Refactor annotate_spectrum for possibility of external backends | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -40,7 +40,7 @@ See also\nDependencies\n------------\n-This module requires :py:mod:`matplotlib`.\n+This module requires :py:mod:`matplotlib`. Optional dependencies: :py:mod:`adjustText`.\n-------------------------------------------------------------------------------\n@@ -76,8 +76,7 @@ def plot_line(a, b, xlim=None, *args, **kwargs):\nb : float\nThe intercept of the line.\nxlim : tuple, optional\n- Minimal and maximal values of `x`. If not given, :py:func:`pylab.xlim`\n- will be called.\n+ Minimal and maximal values of `x`. If not given, :py:func:`pylab.xlim` will be called.\n*args\nPassed to :py:func:`pylab.plot` after `x` and `y` values.\n**kwargs\n@@ -88,10 +87,9 @@ def plot_line(a, b, xlim=None, *args, **kwargs):\nout : matplotlib.lines.Line2D\nThe line object.\n\"\"\"\n- if xlim is None: xlim = pylab.xlim()\n- return pylab.plot([xlim[0], xlim[1]],\n- [a * xlim[0] + b, a * xlim[1] + b],\n- *args, **kwargs)\n+ if xlim is None:\n+ xlim = pylab.xlim()\n+ return pylab.plot([xlim[0], xlim[1]], [a * xlim[0] + b, a * xlim[1] + b], *args, **kwargs)\ndef scatter_trend(x, y=None, **kwargs):\n@@ -361,44 +359,7 @@ def plot_spectrum(spectrum, centroided=True, *args, **kwargs):\nreturn pylab.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n-def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\n- \"\"\"Plot a spectrum and annotate matching fragment peaks.\n-\n- Parameters\n- ----------\n- spectrum : dict\n- A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.\n- peptide : str\n- A modX sequence.\n- centroided : bool, optional\n- Passed to :py:func:`plot_spectrum`.\n- ion_types : Container, keyword only, optional\n- Ion types to be considered for annotation. Default is `('b', 'y')`.\n- maxcharge : int, keyword only, optional\n- Maximum charge state for fragment ions to be considered. Default is `1`.\n- colors : dict, keyword only, optional\n- Keys are ion types, values are colors to plot the annotated peaks with. Defaults to a red-blue scheme.\n- ftol : float, keyword only, optional\n- A fixed m/z tolerance value for peak matching. Alternative to `rtol`.\n- rtol : float, keyword only, optional\n- A relative m/z error for peak matching. Default is 10 ppm.\n- adjust_text : bool, keyword only, optional\n- Adjust the overlapping text annotations using :py:mod:`adjustText`.\n- text_kw : dict, keyword only, optional\n- Keyword arguments for :py:func:`pylab.text`.\n- adjust_kw : dict, keyword only, optional\n- Keyword argyuments for :py:func:`adjust_text`.\n- ion_comp : dict, keyword only, optional\n- A dictionary defining ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\n- mass_data : dict, keyword only, optional\n- A dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\n- aa_mass : dict, keyword only, optional\n- A dictionary of amino acid residue masses.\n- *args\n- Passed to :py:func:`plot_spectrum`.\n- **kwargs\n- Passed to :py:func:`plot_spectrum`.\n- \"\"\"\n+def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\ntypes = kwargs.pop('ion_types', ('b', 'y'))\nmaxcharge = kwargs.pop('maxcharge', 1)\naa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\n@@ -408,6 +369,7 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nstd_colors.update({i: 'blue' for i in 'abc'})\ncolors = kwargs.pop('colors', std_colors)\nftol = kwargs.pop('ftol', None)\n+ centroided = kwargs.pop('centroided', True)\nif ftol is None:\nrtol = kwargs.pop('rtol', 1e-5)\ntext_kw = kwargs.pop('text_kw', dict(ha='center', clip_on=True, backgroundcolor='#ffffff99'))\n@@ -458,4 +420,57 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nif adjust:\nadjust_text(texts, **adjust_kw)\nkwargs.setdefault('zorder', -1)\n- plot_spectrum(spectrum, centroided, *args, **kwargs)\n+ return plot_spectrum(spectrum, centroided, *args, **kwargs)\n+\n+\n+_annotation_backends = {\n+ 'default': _default_annotate_spectrum,\n+}\n+\n+\n+def annotate_spectrum(spectrum, peptide, *args, **kwargs):\n+ \"\"\"Plot a spectrum and annotate matching fragment peaks.\n+\n+ Parameters\n+ ----------\n+ spectrum : dict\n+ A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.\n+ peptide : str\n+ A modX sequence.\n+ backend : str, keyword only, optional\n+ One of `{'default',}`.\n+ ion_types : Container, keyword only, optional\n+ Ion types to be considered for annotation. Default is `('b', 'y')`.\n+ maxcharge : int, keyword only, optional\n+ Maximum charge state for fragment ions to be considered. Default is `1`.\n+ colors : dict, keyword only, optional\n+ Keys are ion types, values are colors to plot the annotated peaks with. Defaults to a red-blue scheme.\n+ ftol : float, keyword only, optional\n+ A fixed m/z tolerance value for peak matching. Alternative to `rtol`.\n+ rtol : float, keyword only, optional\n+ A relative m/z error for peak matching. Default is 10 ppm.\n+ text_kw : dict, keyword only, optional\n+ Keyword arguments for :py:func:`pylab.text`.\n+ ion_comp : dict, keyword only, optional\n+ A dictionary defining ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\n+ mass_data : dict, keyword only, optional\n+ A dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\n+ aa_mass : dict, keyword only, optional\n+ A dictionary of amino acid residue masses.\n+ *args\n+ Passed to the plotting backend.\n+ **kwargs\n+ Passed to the plotting backend.\n+ centroided : bool, keyword only, optional\n+ Passed to :py:func:`plot_spectrum`. Only works with `default` backend.\n+ adjust_text : bool, keyword only, optional\n+ Adjust the overlapping text annotations using :py:mod:`adjustText`. Only works with `default` backend.\n+ adjust_kw : dict, keyword only, optional\n+ Keyword arguments for :py:func:`adjust_text`. Only works with `default` backend.\n+ \"\"\"\n+ bname = kwargs.pop('backend', 'default')\n+ backend = _annotation_backends.get(bname)\n+ if backend is None:\n+ raise PyteomicsError('Unknown backend name: {}. Should be one of: {}.'.format(\n+ bname, '; '.join(_annotation_backends)))\n+ return backend(spectrum, peptide, *args, **kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.1b1'\n+__version__ = '4.4.1b2'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Refactor annotate_spectrum for possibility of external backends |
377,522 | 18.12.2020 05:20:55 | -10,800 | e519afed0f9eacb44016db9b8bceee0df39146ad | Make _get_time a static method | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -377,7 +377,8 @@ class TimeOrderedIndexedReaderMixin(IndexedReaderMixin):\nsuper(TimeOrderedIndexedReaderMixin, self).__init__(*args, **kwargs)\nself._time = RTLocator(self)\n- def _get_time(self, scan):\n+ @staticmethod\n+ def _get_time(scan):\nraise NotImplementedError\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -324,7 +324,8 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\ndef get_spectrum(self, key):\nreturn self.get_by_id(key)\n- def _get_time(self, spectrum):\n+ @staticmethod\n+ def _get_time(spectrum):\ntry:\nreturn spectrum['params']['rtinseconds']\nexcept KeyError:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -302,7 +302,8 @@ class MzML(xml.ArrayConversionMixin, aux.TimeOrderedIndexedReaderMixin, xml.Mult\ndel info[k]\ninfo.pop('id', None)\n- def _get_time(self, scan):\n+ @staticmethod\n+ def _get_time(scan):\nreturn scan['scanList']['scan'][0]['scan start time']\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -220,5 +220,6 @@ class MzmlTest(unittest.TestCase):\nrecord = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress positive integer compression followed by zlib compression', data.dtype)\nself.assertTrue(np.allclose(data, record.decode(), atol=0.6))\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make _get_time a static method |
377,522 | 18.12.2020 05:31:18 | -10,800 | 63143c62bad21fe26c11f3139cc5a45fbc4db407 | Make auxiliary parsing methods on MGFBase static; move _get_time to MGFBase | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -145,13 +145,16 @@ class MGFBase(object):\nelse:\nself._header = None\n- def parse_precursor_charge(self, charge_text, list_only=False):\n+ @staticmethod\n+ def parse_precursor_charge(charge_text, list_only=False):\nreturn aux._parse_charge(charge_text, list_only=list_only)\n- def parse_peak_charge(self, charge_text, list_only=False):\n+ @staticmethod\n+ def parse_peak_charge(charge_text, list_only=False):\nreturn aux._parse_charge(charge_text, list_only=False)\n- def parse_peak_ion(self, ion_text):\n+ @staticmethod\n+ def parse_peak_ion(ion_text):\nreturn aux._parse_ion(ion_text)\n@property\n@@ -247,6 +250,13 @@ class MGFBase(object):\ndef get_spectrum(self, title):\nraise NotImplementedError()\n+ @staticmethod\n+ def _get_time(spectrum):\n+ try:\n+ return spectrum['params']['rtinseconds']\n+ except KeyError:\n+ raise aux.PyteomicsError('RT information not found.')\n+\nclass IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexSavingTextReader):\n\"\"\"\n@@ -324,13 +334,6 @@ class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixi\ndef get_spectrum(self, key):\nreturn self.get_by_id(key)\n- @staticmethod\n- def _get_time(spectrum):\n- try:\n- return spectrum['params']['rtinseconds']\n- except KeyError:\n- raise aux.PyteomicsError('RT information not found.')\n-\nclass MGF(MGFBase, aux.FileReader):\n\"\"\"\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make auxiliary parsing methods on MGFBase static; move _get_time to MGFBase |
377,526 | 11.01.2021 16:54:23 | -3,600 | ef75235bebf4a9ef533c84a239d284af57d4e661 | Parsing mztab 2.0 sections in `MzTab`. | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "@@ -276,6 +276,10 @@ class MzTab(_MzTabParserBase):\nreturn self.protein_table\nif key in ('sml', ):\nreturn self.small_molecule_table\n+ if key in ('smf', ):\n+ return self.small_molecule_feature_table\n+ if key in ('sme', ):\n+ return self.small_molecule_evidence_table\nelse:\nraise KeyError(key)\n@@ -284,12 +288,16 @@ class MzTab(_MzTabParserBase):\nyield 'PEP', self.peptide_table\nyield 'PSM', self.spectrum_match_table\nyield 'SML', self.small_molecule_table\n+ yield 'SMF', self.small_molecule_feature_table\n+ yield 'SME', self.small_molecule_evidence_table\ndef _init_tables(self):\nself.protein_table = _MzTabTable(\"protein\")\nself.peptide_table = _MzTabTable(\"peptide\")\nself.spectrum_match_table = _MzTabTable('psm')\nself.small_molecule_table = _MzTabTable('small molecule')\n+ self.small_molecule_feature_table = _MzTabTable('small molecule feature')\n+ self.small_molecule_evidence_table = _MzTabTable('small molecule evidence')\ndef _transform_tables(self):\nif self._table_format == DATA_FRAME_FORMAT:\n@@ -297,16 +305,22 @@ class MzTab(_MzTabParserBase):\nself.peptide_table = self.peptide_table.as_df()\nself.spectrum_match_table = self.spectrum_match_table.as_df('PSM_ID')\nself.small_molecule_table = self.small_molecule_table.as_df()\n+ self.small_molecule_feature_table = self.small_molecule_feature_table.as_df()\n+ self.small_molecule_evidence_table = self.small_molecule_evidence_table.as_df()\nelif self._table_format in (DICT_FORMAT, dict):\nself.protein_table = self.protein_table.as_dict()\nself.peptide_table = self.peptide_table.as_dict()\nself.spectrum_match_table = self.spectrum_match_table.as_dict()\nself.small_molecule_table = self.small_molecule_table.as_dict()\n+ self.small_molecule_feature_table = self.small_molecule_feature_table.as_dict()\n+ self.small_molecule_evidence_table = self.small_molecule_evidence_table.as_dict()\nelif callable(self._table_format):\nself.protein_table = self._table_format(self.protein_table)\nself.peptide_table = self._table_format(self.peptide_table)\nself.spectrum_match_table = self._table_format(self.spectrum_match_table)\nself.small_molecule_table = self._table_format(self.small_molecule_table)\n+ self.small_molecule_feature_table = self._table_format(self.small_molecule_feature_table)\n+ self.small_molecule_evidence_table = self._table_format(self.small_molecule_evidence_table)\ndef _parse(self):\nfor i, line in enumerate(self.file):\n@@ -329,6 +343,10 @@ class MzTab(_MzTabParserBase):\nself.spectrum_match_table.header = tokens[1:]\nelif tokens[0] == \"SMH\":\nself.small_molecule_table.header = tokens[1:]\n+ elif tokens[0] == \"SFH\":\n+ self.small_molecule_feature_table.header = tokens[1:]\n+ elif tokens[0] == \"SEH\":\n+ self.small_molecule_evidence_table.header = tokens[1:]\n# rows\nelif tokens[0] == \"PRT\":\nself.protein_table.add(tokens[1:])\n@@ -338,6 +356,10 @@ class MzTab(_MzTabParserBase):\nself.spectrum_match_table.add(tokens[1:])\nelif tokens[0] == \"SML\":\nself.small_molecule_table.add(tokens[1:])\n+ elif tokens[0] == \"SMF\":\n+ self.small_molecule_feature_table.add(tokens[1:])\n+ elif tokens[0] == \"SME\":\n+ self.small_molecule_evidence_table.add(tokens[1:])\ndef keys(self):\nreturn OrderedDict(self).keys()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Parsing mztab 2.0 sections in `MzTab`. |
377,526 | 11.01.2021 17:29:00 | -3,600 | 64e3470f6325f3dce63ca1a413e51189c4e8b153 | Fixed `test_iter`. | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mztab.py",
"new_path": "tests/test_mztab.py",
"diff": "@@ -18,7 +18,7 @@ class MzTabTest(unittest.TestCase):\ndef test_iter(self):\nreader = mztab.MzTab(self.path)\ntables = list(reader)\n- self.assertEqual(len(tables), 4)\n+ self.assertEqual(len(tables), 6)\n[self.assertEqual(len(t), 2) for t in tables]\ndef test_getitem(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fixed `test_iter`. |
377,526 | 11.01.2021 21:07:04 | -3,600 | 17f1dac4a0d314a0305dd85782cc48b1bb35fbdb | Version parser is implemented. | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "@@ -189,8 +189,10 @@ class MzTab(_MzTabParserBase):\nself._table_format = table_format\nself._init_tables()\nself._parse()\n+ self._determine_schema_version\nself._transform_tables()\n+\n@property\ndef table_format(self):\nreturn self._table_format\n@@ -284,10 +286,13 @@ class MzTab(_MzTabParserBase):\nraise KeyError(key)\ndef __iter__(self):\n+ if self.variant == \"P\":\nyield 'PRT', self.protein_table\nyield 'PEP', self.peptide_table\nyield 'PSM', self.spectrum_match_table\nyield 'SML', self.small_molecule_table\n+ elif self.variant == \"M\":\n+ yield 'SML', self.small_molecule_table\nyield 'SMF', self.small_molecule_feature_table\nyield 'SME', self.small_molecule_evidence_table\n@@ -361,6 +366,13 @@ class MzTab(_MzTabParserBase):\nelif tokens[0] == \"SME\":\nself.small_molecule_evidence_table.add(tokens[1:])\n+ def _determine_schema_version(self):\n+ version_parsed, variant = re.search(r\"(?P<schema_version>\\d+.\\d+.\\d+)(?:-(?P<schema_variant>[MP]))?\", self.version).groups()\n+ if variant is None:\n+ variant = \"P\"\n+ self.num_version = [int(v) for v in version_parsed.split(\".\")]\n+ self.variant = variant\n+\ndef keys(self):\nreturn OrderedDict(self).keys()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mztab.py",
"new_path": "tests/test_mztab.py",
"diff": "@@ -18,7 +18,7 @@ class MzTabTest(unittest.TestCase):\ndef test_iter(self):\nreader = mztab.MzTab(self.path)\ntables = list(reader)\n- self.assertEqual(len(tables), 6)\n+ self.assertEqual(len(tables), 4)\n[self.assertEqual(len(t), 2) for t in tables]\ndef test_getitem(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Version parser is implemented. |
377,526 | 12.01.2021 22:17:08 | -3,600 | d699bca8b7e804330332a53413aebac28c801713 | Added test for mztab 2.0 | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mztab.py",
"new_path": "tests/test_mztab.py",
"diff": "@@ -7,23 +7,39 @@ from pyteomics import mztab\nclass MzTabTest(unittest.TestCase):\n- path = 'test.mztab'\n+ path_mztab1 = 'test.mztab'\n+ path_mztab2 = 'test_mztab2'\ndef test_metadata(self):\n- reader = mztab.MzTab(self.path)\n- self.assertEqual(len(reader.metadata), 208)\n- value = reader.metadata['fixed_mod[1]']\n- self.assertEqual(value, 'CHEMMOD:57.0214637236')\n+ reader_mztab1 = mztab.MzTab(self.path_mztab1)\n+ self.assertEqual(len(reader_mztab1.metadata), 208)\n+ value_from_mztab1 = reader_mztab1.metadata['fixed_mod[1]']\n+ self.assertEqual(value_from_mztab1, 'CHEMMOD:57.0214637236')\n+\n+ reader_mztab2 = mztab.MzTab(self.path_mztab2)\n+ self.assertEqual(len(reader_mztab2.metadata), 61)\n+ value_from_mztab2 = reader_mztab2.metadata['sample_processing[1]']\n+ self.assertEqual(value_from_mztab2, 'MSIO, MSIO:0000148, high performance liquid chromatography')\n+\ndef test_iter(self):\n- reader = mztab.MzTab(self.path)\n- tables = list(reader)\n+ reader_mztab1 = mztab.MzTab(self.path_mztab1)\n+ tables = list(reader_mztab1)\nself.assertEqual(len(tables), 4)\n[self.assertEqual(len(t), 2) for t in tables]\n+ reader_mztab2 = mztab.MzTab(self.path_mztab2)\n+ tables = list(reader_mztab2)\n+ self.assertEqual(len(tables), 3)\n+ [self.assertEqual(len(t), 2) for t in tables]\n+\ndef test_getitem(self):\n- reader = mztab.MzTab(self.path)\n- table = reader['psm']\n+ reader_mztab1 = mztab.MzTab(self.path_mztab1)\n+ table = reader_mztab1['psm']\n+ self.assertIsInstance(table, mztab.pd.DataFrame)\n+\n+ reader_mztab2 = mztab.MzTab(self.path_mztab2)\n+ table = reader_mztab2['sme']\nself.assertIsInstance(table, mztab.pd.DataFrame)\nif __name__ == '__main__':\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test_mztab2.mztab",
"diff": "+COM Meta data section\n+MTD mzTab-version 2.0.0-M\n+MTD mzTab-ID ISAS-2018-1234\n+MTD description Minimal proposed sample file for identification and quantification of lipids\n+MTD publication[1] pubmed:29039908 | doi:10.1021/acs.analchem.7b03576\n+MTD cv[1]-label MS\n+MTD cv[1]-full_name PSI-MS controlled vocabulary\n+MTD cv[1]-version 4.0.18\n+MTD cv[1]-uri https://github.com/HUPO-PSI/psi-ms-CV/blob/master/psi-ms.obo\n+MTD cv[2]-label MSIO\n+MTD cv[2]-uri https://www.ebi.ac.uk/ols/ontologies/msio\n+MTD cv[2]-version 1.0.1\n+MTD cv[2]-full_name Metabolomics Standards Initiative Ontology (MSIO)\n+MTD cv[3]-label UO\n+MTD cv[3]-full_name Units of Measurement Ontology\n+MTD cv[3]-version 2017-09-25\n+MTD cv[3]-uri http://purl.obolibrary.org/obo/uo.owl\n+MTD quantification_method [MS, MS:1001838, SRM quantitation analysis, ]\n+MTD sample_processing[1] [MSIO, MSIO:0000148, high performance liquid chromatography, ]\n+MTD instrument[1]-name [MS, MS:1001911, Q Exactive , ]\n+MTD instrument[1]-source [MS, MS:1000073, electrospray ionization, ]\n+MTD instrument[1]-analyzer[1] [MS, MS:1000081, quadrupole, ]\n+MTD instrument[1]-analyzer[2] [MS, MS:1000484, orbitrap, ]\n+MTD instrument[1]-detector [MS, MS:1000624, inductive detector, ]\n+MTD software[1] [MS, MS:1000532, Xcalibur,2.8-280502/2.8.1.2806]\n+MTD software[1]-setting[1] ScheduledSRMWindow: 2 min\n+MTD software[1]-setting[2] CycleTime: 2 s\n+MTD software[2] [MS, MS:1000922, Skyline, 3.5.0.9319]\n+MTD software[2]-setting[1] MSMSmassrange: (50.0, 1800.0)\n+MTD sample[1] QEx-1273-prm-sp1\n+MTD sample[1]-description Sphingolipids with concentration reported as picomolar per mg of protein, abundances are reported after calibration correction.\n+MTD ms_run[1]-location file:///C:/data/QEx-1273-prm-sp1.mzML\n+MTD ms_run[1]-format [MS, MS:1000584, mzML file, ]\n+MTD ms_run[1]-id_format [MS, MS:1000768, Thermo nativeID format, ]\n+MTD ms_run[1]-scan_polarity[1] [MS, MS:1000130, positive scan, ]\n+MTD ms_run[1]-instrument_ref instrument[1]\n+MTD assay[1] Description of assay 1\n+MTD assay[1]-sample_ref sample[1]\n+MTD assay[1]-ms_run_ref ms_run[1]\n+MTD study_variable[1] Sphingolipid SRM Quantitation\n+MTD study_variable[1]-assay_refs assay[1]\n+MTD study_variable[1]-description sphingolipid srm quantitation\n+MTD study_variable[1]-average_function [MS, MS:1002883, median, ]\n+MTD study_variable[1]-variation_function [MS, MS:1002885, standard error, ]\n+MTD small_molecule-quantification_unit [UO, UO:0000072, picomolal, ]\n+MTD small_molecule_feature-quantification_unit [UO, UO:0000072, picomolal, ]\n+MTD small_molecule-identification_reliability [MS, MS:1002896, compound identification confidence level, ]\n+MTD database[1] [,, Pubchem, ]\n+MTD database[1]-prefix PUBCHEM-CPD\n+MTD database[1]-version 02.12.2017\n+MTD database[1]-uri https://www.ncbi.nlm.nih.gov/pccompound\n+MTD database[2] [,, LipidMaps, ]\n+MTD database[2]-prefix LM\n+MTD database[2]-version 2017-12\n+MTD database[2]-uri http://www.lipidmaps.org/\n+MTD database[3] [,, LipidCreator Transitions, ]\n+MTD database[3]-prefix LCTR\n+MTD database[3]-version 2018-07\n+MTD database[3]-uri https://lifs.isas.de/lipidcreator\n+COM MTD colunit-small_molecule retention_time=[UO, UO:0000010, second, ]\n+MTD colunit-small_molecule_evidence opt_global_mass_error=[UO, UO:0000169, parts per million, ]\n+MTD id_confidence_measure[1] [MS, MS:1002890, fragmentation score, ]\n+MTD external_study_uri[1] file:///C:/data/prm.sky.zip\n+\n+COM \"MzTab 2.0.0-M \"\"proposed\"\" specification\"\n+COM Summary rows.\n+COM Evidences (e.g. multiple modifications, adducts incl. charge variants are summarized).\n+COM For most use cases this summary lines may be sufficient.\n+COM Negative and positive scan polarities are currently not explicitly included, this is still under debate in the mzTAB community.\n+SMH SML_ID SMF_ID_REFS chemical_name database_identifier chemical_formula smiles inchi uri theoretical_neutral_mass adduct_ions reliability best_id_confidence_measure best_id_confidence_value abundance_assay[1] abundance_study_variable[1] abundance_variation_study_variable[1] opt_global_lipid_category opt_global_lipid_species opt_global_lipid_best_id_level\n+SML 1 1 | 2 | 3 | 4 Cer(d18:1/24:0) LM:LMSP02010012 C42H83NO3 CCCCCCCCCCCCCCCCCCCCCCCC(=O)N[C@@H](CO)[C@H](O)/C=C/CCCCCCCCCCCCC InChI=1S/C42H83NO3/c1-3-5-7-9-11-13-15-17-18-19-20-21-22-23-24-26-28-30-32-34-36-38-42(46)43-40(39-44)41(45)37-35-33-31-29-27-25-16-14-12-10-8-6-4-2/h35,37,40-41,44-45H,3-34,36,38-39H2,1-2H3,(H,43,46)/b37-35+/t40-,41+/m0/s1 http://www.lipidmaps.org/data/LMSDRecord.php?LM_ID=LMSP02010012 649.6373 [M+H]+ 2 [,, qualifier ions exact mass,] 0.958 4.448784E-05 4.448784E-05 0 Sphingolipids Cer 42:1 Cer d18:1/24:0\n+\n+COM MS feature rows , used to report m/z and individual abundance information for quantification\n+SFH SMF_ID SME_ID_REFS SME_ID_REF_ambiguity_code adduct_ion isotopomer exp_mass_to_charge charge retention_time_in_seconds retention_time_in_seconds_start retention_time_in_seconds_end abundance_assay[1] opt_global_quantifiers_SMF_ID_REFS\n+SMF 1 1 null [M+H]1+ null 650.6432 1 821.2341 756.0000 954.0000 4.448784E-05 3\n+SMF 2 2 null null null 252.2677 1 821.2341 756.0000 954.0000 6.673176E-06 null\n+SMF 3 3 null null null 264.2689 1 821.2341 756.0000 954.0000 1.3346352E-05 null\n+SMF 4 4 null null null 282.2788 1 821.2341 756.0000 954.0000 9.831813E-06 null\n+\n+COM Evidence rows for parent / fragment ions.\n+COM Primary use case: report single hits from spectral library or accurate mass searches without quantification. -> Qualification\n+SEH SME_ID evidence_input_id database_identifier chemical_formula smiles inchi chemical_name uri derivatized_form adduct_ion exp_mass_to_charge charge theoretical_mass_to_charge opt_global_mass_error spectra_ref identification_method ms_level id_confidence_measure[1] rank opt_global_qualifiers_evidence_grouping_ID_REFS\n+SME 1 1 LM:LMSP0501AB02 C42H83NO3 CCCCCCCCCCCCCCCCCCCCCCCC(=O)N[C@@H](CO)[C@H](O)/C=C/CCCCCCCCCCCCC InChI=1S/C42H83NO3/c1-3-5-7-9-11-13-15-17-18-19-20-21-22-23-24-26-28-30-32-34-36-38-42(46)43-40(39-44)41(45)37-35-33-31-29-27-25-16-14-12-10-8-6-4-2/h35,37,40-41,44-45H,3-34,36,38-39H2,1-2H3,(H,43,46)/b37-35+/t40-,41+/m0/s1 LacCer d18:1/12:0 http://www.lipidmaps.org/data/LMSDRecord.php?LM_ID=LMSP02010012 null [M+H]1+ 650.6432 1 650.6446 -2.1517 ms_run[1]:controllerType=0 controllerNumber=1 scan=731 [,, qualifier ions exact mass,] [MS,MS:1000511, ms level, 1] 0.958 1 2\n+SME 2 2 LCTR:LCTR0809812 C17H33N null null Cer d18:1/24:0 W' - CHO null null null 252.2677 1 252.2686 -3.5676 ms_run[1]:controllerType=0 controllerNumber=1 scan=732 [,, exact mass, ] [MS,MS:1000511, ms level, 2] 0.9780 1 null\n+SME 3 2 LCTR:LCTR0871245 C18H33N null null Cer d18:1/24:0 W'' null null null 264.2689 1 264.2686 -1.1352 ms_run[1]:controllerType=0 controllerNumber=1 scan=732 [,, exact mass, ] [MS,MS:1000511, ms level, 2] 0.7500 1 null\n+SME 4 2 LCTR:LCTR0809711 C18H35NO null null Cer d18:1/24:0 W' null null null 282.2788 1 282.2791 -1.0628 ms_run[1]:controllerType=0 controllerNumber=1 scan=732 [,, exact mass, ] [MS,MS:1000511, ms level, 2] 0.8760 1 null\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Added test for mztab 2.0 |
377,526 | 12.01.2021 22:18:05 | -3,600 | ee9ec7ef9bfd19a79fc3d54118caccd1bd41e9d4 | Quick fix for mode and type properties | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "@@ -201,13 +201,14 @@ class MzTab(_MzTabParserBase):\ndef version(self):\nreturn self.metadata['mzTab-version']\n+\n@property\ndef mode(self):\n- return self.metadata['mzTab-mode']\n+ return self.metadata.get('mzTab-mode', \"\")\n@property\ndef type(self):\n- return self.metadata['mzTab-type']\n+ return self.metadata.get('mzTab-type', \"\")\ndef collapse_properties(self, proplist):\n'''Collapse a flat property list into a hierchical structure.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Quick fix for mode and type properties |
377,526 | 12.01.2021 22:31:46 | -3,600 | e094cce9ab8869740978f0f54f09cf3c888aae75 | File name bug is fixed. | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mztab.py",
"new_path": "tests/test_mztab.py",
"diff": "@@ -8,7 +8,7 @@ from pyteomics import mztab\nclass MzTabTest(unittest.TestCase):\npath_mztab1 = 'test.mztab'\n- path_mztab2 = 'test_mztab2'\n+ path_mztab2 = 'test_mztab2.mztab'\ndef test_metadata(self):\nreader_mztab1 = mztab.MzTab(self.path_mztab1)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | File name bug is fixed. |
377,526 | 12.01.2021 22:43:31 | -3,600 | 14e1940696afdfa86e8abc3f7779dcab4a26c330 | Test is fixed. | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mztab.py",
"new_path": "tests/test_mztab.py",
"diff": "@@ -19,7 +19,7 @@ class MzTabTest(unittest.TestCase):\nreader_mztab2 = mztab.MzTab(self.path_mztab2)\nself.assertEqual(len(reader_mztab2.metadata), 61)\nvalue_from_mztab2 = reader_mztab2.metadata['sample_processing[1]']\n- self.assertEqual(value_from_mztab2, 'MSIO, MSIO:0000148, high performance liquid chromatography')\n+ self.assertEqual(value_from_mztab2, 'high performance liquid chromatography')\ndef test_iter(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Test is fixed. |
377,522 | 13.01.2021 20:00:22 | -10,800 | 16d52df9b3a1607396432c0d2bbf766bc0b749d3 | Split test methods for different files | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mztab.py",
"new_path": "tests/test_mztab.py",
"diff": "@@ -10,37 +10,40 @@ class MzTabTest(unittest.TestCase):\npath_mztab1 = 'test.mztab'\npath_mztab2 = 'test_mztab2.mztab'\n- def test_metadata(self):\n+ def test_metadata_mztab1(self):\nreader_mztab1 = mztab.MzTab(self.path_mztab1)\nself.assertEqual(len(reader_mztab1.metadata), 208)\nvalue_from_mztab1 = reader_mztab1.metadata['fixed_mod[1]']\nself.assertEqual(value_from_mztab1, 'CHEMMOD:57.0214637236')\n+ def test_metadata_mztab2(self):\nreader_mztab2 = mztab.MzTab(self.path_mztab2)\nself.assertEqual(len(reader_mztab2.metadata), 61)\nvalue_from_mztab2 = reader_mztab2.metadata['sample_processing[1]']\nself.assertEqual(value_from_mztab2, 'high performance liquid chromatography')\n-\n- def test_iter(self):\n+ def test_iter_mztab1(self):\nreader_mztab1 = mztab.MzTab(self.path_mztab1)\ntables = list(reader_mztab1)\nself.assertEqual(len(tables), 4)\n[self.assertEqual(len(t), 2) for t in tables]\n+ def test_iter_mztab2(self):\nreader_mztab2 = mztab.MzTab(self.path_mztab2)\ntables = list(reader_mztab2)\nself.assertEqual(len(tables), 3)\n[self.assertEqual(len(t), 2) for t in tables]\n- def test_getitem(self):\n+ def test_getitem_mztab1(self):\nreader_mztab1 = mztab.MzTab(self.path_mztab1)\ntable = reader_mztab1['psm']\nself.assertIsInstance(table, mztab.pd.DataFrame)\n+ def test_getitem_mztab2(self):\nreader_mztab2 = mztab.MzTab(self.path_mztab2)\ntable = reader_mztab2['sme']\nself.assertIsInstance(table, mztab.pd.DataFrame)\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Split test methods for different files |
377,522 | 13.01.2021 20:05:18 | -10,800 | 607581e4fc2698df12409d6a96f4a044a89f9b73 | Return None for missing metadata, add more properties | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "@@ -192,7 +192,6 @@ class MzTab(_MzTabParserBase):\nself._determine_schema_version()\nself._transform_tables()\n-\n@property\ndef table_format(self):\nreturn self._table_format\n@@ -201,14 +200,25 @@ class MzTab(_MzTabParserBase):\ndef version(self):\nreturn self.metadata['mzTab-version']\n-\n@property\ndef mode(self):\n- return self.metadata.get('mzTab-mode', \"\")\n+ return self.metadata.get('mzTab-mode')\n@property\ndef type(self):\n- return self.metadata.get('mzTab-type', \"\")\n+ return self.metadata.get('mzTab-type')\n+\n+ @property\n+ def id(self):\n+ return self.metadata.get('mzTab-ID')\n+\n+ @property\n+ def title(self):\n+ return self.metadata.get('title')\n+\n+ @property\n+ def description(self):\n+ return self.metadata.get('description')\ndef collapse_properties(self, proplist):\n'''Collapse a flat property list into a hierchical structure.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Return None for missing metadata, add more properties |
377,522 | 14.01.2021 19:51:53 | -10,800 | a866f042a2bbc7d9f961e4cb52c39a2dc9547c27 | Add test for mztab variant property | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mztab.py",
"new_path": "tests/test_mztab.py",
"diff": "@@ -22,6 +22,14 @@ class MzTabTest(unittest.TestCase):\nvalue_from_mztab2 = reader_mztab2.metadata['sample_processing[1]']\nself.assertEqual(value_from_mztab2, 'high performance liquid chromatography')\n+ def test_metadata_variant_P(self):\n+ reader_mztab1 = mztab.MzTab(self.path_mztab1)\n+ self.assertEqual(reader_mztab1.variant, 'P')\n+\n+ def test_metadata_variant_M(self):\n+ reader_mztab2 = mztab.MzTab(self.path_mztab2)\n+ self.assertEqual(reader_mztab2.variant, 'M')\n+\ndef test_iter_mztab1(self):\nreader_mztab1 = mztab.MzTab(self.path_mztab1)\ntables = list(reader_mztab1)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add test for mztab variant property |
377,522 | 14.01.2021 19:56:19 | -10,800 | f79b28acd93d7c8b92e46d05856a80f3b0aedfff | Accept partial names in test runner script | [
{
"change_type": "MODIFY",
"old_path": "tests/runtests.sh",
"new_path": "tests/runtests.sh",
"diff": "@@ -5,7 +5,14 @@ if [ $# -eq 0 ]; then\nelse\nfor f; do\nfor v in 2.7 3.3 3.4 3.5 3.6 3.7 3.8 3.9; do\n- command -v \"python${v}\" >/dev/null 2>&1 && { echo \"Executing python${v}\" \"$f\"; eval \"python${v}\" \"$f\"; }\n+ command -v \"python${v}\" >/dev/null 2>&1 && {\n+ if [ -f \"$f\" ]; then\n+ fname=\"$f\"\n+ elif [ -f \"test_${f}.py\" ]; then\n+ fname=\"test_${f}.py\"\n+ fi\n+ echo \"Executing python${v}\" \"$fname\"; eval \"python${v}\" \"$fname\"\n+ }\ndone\ndone\nfi\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Accept partial names in test runner script |
377,522 | 14.01.2021 20:30:05 | -10,800 | 8214b553490f3f67b452d7c9b36540c4f5503e23 | Bump version to 4.4.1 (release) | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.4.1 beta 3\n-------------\n+4.4.1\n+-----\n- Further tweaked behavior of :py:func:`pyteomics.auxiliary.file_helpers._check_use_index`, which is responsible for\nhandling of `use_index` in :py:func:`read` functions in parser modules.\n- - Also, check out the `Pyteomics Discussions page <https://github.com/levitsky/pyteomics/discussions>`_!\n- You can use it to share your thoughts, ask questions, discuss coding practices, etc.\n-\n- Fix indexing when element identifiers contain XML-escaped characters\n(`#20 <https://github.com/levitsky/pyteomics/pull/20>`_ by Joshua Klein).\n- Add support for MzTab 2.0 (`#22 <https://github.com/levitsky/pyteomics/pull/22>`_ by @annalefarova).\n+ - Also, check out the `Pyteomics Discussions page <https://github.com/levitsky/pyteomics/discussions>`_!\n+ You can use it to share your thoughts, ask questions, discuss coding practices, etc.\n+\n4.4\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.1b3'\n+__version__ = '4.4.1'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Bump version to 4.4.1 (release) |
377,522 | 18.01.2021 14:24:09 | -10,800 | 18be55aa7155b471c1da267486cdb3f76d58b2ce | Update Sphinx documentation, changelog and version | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+4.4.2 beta 1\n+------------\n+\n+ - Extend the :py:mod:`pyteomics.mztab.MzTab` parser with auto-generated properties. Almost all metadata entities are\n+ now exposed as properties on the parser object (`#23 <https://github.com/levitsky/pyteomics/pull/23>`_ by Joshua Klein).\n+\n4.4.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/api/mztab.rst",
"new_path": "doc/source/api/mztab.rst",
"diff": ".. automodule:: pyteomics.mztab\n- :no-members:\n-\n- .. autoclass:: MzTab\n- :inherited-members:\n-\n-Helpers\n--------\n- .. autoclass:: Group\n-\n-\n-Internals\n----------\n.. autoclass:: _MzTabTable\n-\n-Property Management\n-~~~~~~~~~~~~~~~~~~~\n-\n-:mod:`mztab` uses metaprogramming to generate its metadata accessors, generated by\n-these classes working in concert.\n-\n- .. autoclass:: MetadataBackedProperty\n-\n.. autoclass:: MetadataBackedCollection\n-\n- .. autoclass:: MetadataPropertyAnnotator\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "\"\"\"\nmztab - mzTab file reader\n-============================\n+=========================\nSummary\n-------\n@@ -16,8 +16,33 @@ of the file-level metadata. MzTab specifications 1.0 and 2.0 are supported.\nData access\n-----------\n- :py:class:`MzTab` - a class representing a single mzTab file\n+ :py:class:`MzTab` - a class representing a single mzTab file.\n+Helpers\n+-------\n+\n+ :py:class:`Group` - a collection of metadata relating to one entity.\n+\n+\n+Internals\n+---------\n+\n+ :py:class:`_MzTabTable` - a single table in an mzTab file.\n+\n+\n+Property Management\n+~~~~~~~~~~~~~~~~~~~\n+\n+:mod:`mztab` uses metaprogramming to generate its metadata accessors, generated by\n+these classes working in concert.\n+\n+ :py:class:`MetadataBackedProperty`\n+\n+ :py:class:`MetadataBackedCollection`\n+\n+ :py:class:`MetadataPropertyAnnotator`\n+\n+-------------------------------------------------------------------------------\n\"\"\"\nimport re\n@@ -130,7 +155,6 @@ Returns\nreturn doc\n-\nclass MetadataPropertyAnnotator(type):\n'''A simple metaclass to do some class-creation time introspection\nand descriptor binding.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.1'\n+__version__ = '4.4.2b1'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update Sphinx documentation, changelog and version |
377,528 | 21.01.2021 21:33:28 | 25,200 | 8d3ac6c67ee0ff5c9caa71b61f2f06bf0cf63d0a | Fixed version parsing
Accepts version numbers that do not necessarily contain 3 numbers (x.x.x) - can read versions that contain 1 or 2 numbers (x) or (x.x) | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "@@ -744,8 +744,8 @@ class MzTab(_MzTabParserBase):\nself.small_molecule_evidence_table.add(tokens[1:])\ndef _determine_schema_version(self):\n- version_parsed, variant = re.search(r\"(?P<schema_version>\\d+.\\d+.\\d+)(?:-(?P<schema_variant>[MP]))?\", self.version).groups()\n- if variant is None:\n+ version_parsed, _, variant = str(self.version).partition(\"-\")\n+ if variant is None or (variant != \"M\" and variant != \"P\"):\nvariant = \"P\"\nself.num_version = [int(v) for v in version_parsed.split(\".\")]\nself.variant = variant\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fixed version parsing
Accepts version numbers that do not necessarily contain 3 numbers (x.x.x) - can read versions that contain 1 or 2 numbers (x) or (x.x) |
377,528 | 22.01.2021 12:22:12 | 25,200 | 695e0e3fc7b233f318958c9d1df496bed529298c | Updated version parsing
Reverted back to a tweaked regex search for stricter parsing and added a check to ensure self.num_version is always a 3 tuple regardless of original version number. | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "@@ -744,10 +744,13 @@ class MzTab(_MzTabParserBase):\nself.small_molecule_evidence_table.add(tokens[1:])\ndef _determine_schema_version(self):\n- version_parsed, _, variant = str(self.version).partition(\"-\")\n- if variant is None or (variant != \"M\" and variant != \"P\"):\n+ version_parsed, variant = re.search(r\"(?P<schema_version>\\d+(?:.\\d+(?:.\\d+)?)?)(?:-(?P<schema_variant>[MP]))?\", str(self.version)).groups()\n+ if variant is None:\nvariant = \"P\"\nself.num_version = [int(v) for v in version_parsed.split(\".\")]\n+ # Ensure self.num_version is 3-tuple\n+ while len(self.num_version) < 3:\n+ self.num_version.append(0)\nself.variant = variant\ndef keys(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Updated version parsing
Reverted back to a tweaked regex search for stricter parsing and added a check to ensure self.num_version is always a 3 tuple regardless of original version number. |
377,522 | 24.01.2021 00:02:38 | -10,800 | 977b253efa556cedd85a6161e4190bbcd28e4eaf | Fix mztab version string pattern | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mztab.py",
"new_path": "pyteomics/mztab.py",
"diff": "@@ -744,7 +744,7 @@ class MzTab(_MzTabParserBase):\nself.small_molecule_evidence_table.add(tokens[1:])\ndef _determine_schema_version(self):\n- version_parsed, variant = re.search(r\"(?P<schema_version>\\d+(?:.\\d+(?:.\\d+)?)?)(?:-(?P<schema_variant>[MP]))?\", str(self.version)).groups()\n+ version_parsed, variant = re.search(r\"(?P<schema_version>\\d+(?:\\.\\d+(?:\\.\\d+)?)?)(?:-(?P<schema_variant>[MP]))?\", str(self.version)).groups()\nif variant is None:\nvariant = \"P\"\nself.num_version = [int(v) for v in version_parsed.split(\".\")]\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix mztab version string pattern |
377,522 | 27.01.2021 17:30:17 | -10,800 | f2ab4e58c14895b2384e9740822a5ffd96f122f1 | Return unchanged value in pepxml safe_float if conversion fails | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -97,6 +97,7 @@ This module requires :py:mod:`lxml`.\nfrom lxml import etree\nfrom . import xml, auxiliary as aux, _schema_defaults\n+\nclass PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):\n\"\"\"Parser class for pepXML files.\"\"\"\nfile_format = 'pepXML'\n@@ -123,13 +124,9 @@ class PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):\nname = xml._local_name(element)\nrec = kwargs.pop('recursive', None)\nif name == self._root_element:\n- info = self._get_info(element, ename=name,\n- recursive=(rec if rec is not None else False),\n- **kwargs)\n+ info = self._get_info(element, ename=name, recursive=(rec if rec is not None else False), **kwargs)\nelse:\n- info = self._get_info(element, ename=name,\n- recursive=(rec if rec is not None else True),\n- **kwargs)\n+ info = self._get_info(element, ename=name, recursive=(rec if rec is not None else True), **kwargs)\ndef safe_float(s):\ntry:\n@@ -137,7 +134,7 @@ class PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):\nexcept ValueError:\nif s.startswith('+-0'):\nreturn 0\n- return None\n+ return s\nconverters = {'float': safe_float, 'int': int,\n'bool': lambda x: x.lower() in {'1', 'true'},\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Return unchanged value in pepxml safe_float if conversion fails |
377,522 | 19.03.2021 23:02:01 | -10,800 | f3c813e12bd2e55cd4401c10754a3b7a151f5d75 | Add parser.psims_rules | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.4.2 beta 3\n+4.4.2 beta 4\n------------\n+ - Add cleavage rules from `MS ontology <http://purl.obolibrary.org/obo/MS_1001045>`_ as\n+ :py:data:`pyteomics.parser.psims_rules`. :py:func:`pyteomics.parser.cleave` now understands keys and accessions from\n+ :py:data:`psims_rules` as rules.\n+\n- Extend the :py:class:`pyteomics.mztab.MzTab` parser with auto-generated properties. Almost all metadata entities are\nnow exposed as properties on the parser object (`#23 <https://github.com/levitsky/pyteomics/pull/23>`_ by Joshua Klein).\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -77,7 +77,7 @@ Data\n:py:data:`std_labels` - a list of all standard sequence\nelements, amino acid residues and terminal modifications.\n- :py:data:`expasy_rules` - a dict with the regular expressions of\n+ :py:data:`expasy_rules` and :py:data:`psims_rules` - two dicts with the regular expressions of\ncleavage rules for the most popular proteolytic enzymes.\n-------------------------------------------------------------------------------\n@@ -101,7 +101,7 @@ Data\nimport re\nfrom collections import deque\nimport itertools as it\n-from .auxiliary import PyteomicsError, memoize, BasicComposition\n+from .auxiliary import PyteomicsError, memoize, BasicComposition, cvstr, cvquery\nstd_amino_acids = ['Q', 'W', 'E', 'R', 'T', 'Y', 'I', 'P', 'A', 'S',\n'D', 'F', 'G', 'H', 'K', 'L', 'C', 'V', 'N', 'M']\n@@ -544,7 +544,7 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exce\nwill not work as expected.\nrule : str or compiled regex\n- A key present in :py:const:`expasy_rules` or a\n+ A key present in :py:data:`expasy_rules`, :py:data:`psims_rules` (or an MS ontology accession) or a\n`regular expression <https://docs.python.org/library/re.html#regular-expression-syntax>`_\ndescribing the site of cleavage. It is recommended\nto design the regex so that it matches only the residue whose C-terminal\n@@ -583,7 +583,9 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exce\nTrue\n>>> cleave('AKAKBK', 'trypsin', 0) == {'AK', 'BK'}\nTrue\n- >>> cleave('GKGKYKCK', expasy_rules['trypsin'], 2) == \\\n+ >>> cleave('AKAKBK', 'MS:1001251', 0) == {'AK', 'BK'}\n+ True\n+ >>> cleave('GKGKYKCK', 'Trypsin/P', 2) == \\\n{'CK', 'GKYK', 'YKCK', 'GKGK', 'GKYKCK', 'GK', 'GKGKYK', 'YK'}\nTrue\n@@ -595,7 +597,12 @@ def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exc\n\"\"\"Like :py:func:`cleave`, but the result is a list. Refer to\n:py:func:`cleave` for explanation of parameters.\n\"\"\"\n- rule = expasy_rules.get(rule, rule)\n+ if rule in expasy_rules:\n+ rule = expasy_rules[rule]\n+ elif rule in psims_rules:\n+ rule = psims_rules[rule]\n+ elif rule in _psims_index:\n+ rule = _psims_index[rule]\nexception = expasy_rules.get(exception, exception)\npeptides = []\nml = missed_cleavages + 2\n@@ -714,6 +721,43 @@ exception=parser.expasy_rules['trypsin_exception'])\n\"\"\"\n+psims_rules = {\n+ cvstr('2-iodobenzoate', 'MS:1001918'): r'(?<=W)',\n+ cvstr('Arg-C', 'MS:1001303'): r'(?<=R)(?!P)',\n+ cvstr('Asp-N', 'MS:1001304'): r'(?=[BD])',\n+ cvstr('Asp-N ambic', 'MS:1001305'): r'(?=[DE])',\n+ cvstr('CNBr', 'MS:1001307'): r'(?<=M)',\n+ cvstr('Chymotrypsin', 'MS:1001306'): r'(?<=[FYWL])(?!P)',\n+ cvstr('Formic acid', 'MS:1001308'): r'((?<=D))|((?=D))',\n+ cvstr('Lys-C', 'MS:1001309'): r'(?<=K)(?!P)',\n+ cvstr('Lys-C/P', 'MS:1001310'): r'(?<=K)',\n+ cvstr('PepsinA', 'MS:1001311'): r'(?<=[FL])',\n+ cvstr('TrypChymo', 'MS:1001312'): r'(?<=[FYWLKR])(?!P)',\n+ cvstr('Trypsin', 'MS:1001251'): r'(?<=[KR])(?!P)',\n+ cvstr('Trypsin/P', 'MS:1001313'): r'(?<=[KR])',\n+ cvstr('V8-DE', 'MS:1001314'): r'(?<=[BDEZ])(?!P)',\n+ cvstr('V8-E', 'MS:1001315'): r'(?<=[EZ])(?!P)',\n+ cvstr('glutamyl endopeptidase', 'MS:1001917'): r'(?<=[^E]E)',\n+ cvstr('leukocyte elastase', 'MS:1001915'): r'(?<=[ALIV])(?!P)',\n+ cvstr('proline endopeptidase', 'MS:1001916'): r'(?<=[HKR]P)(?!P)',\n+}\n+\"\"\"\n+This dict contains regular expressions for cleavage rules of the most\n+popular proteolytic enzymes. The rules were taken from the PSI `MS ontology\n+<http://purl.obolibrary.org/obo/MS_1001045>`_.\n+\n+You can use names or accessions to access the rules.\n+Use :py:func:`pyteomics.auxiliary.cvquery` for accession access::\n+\n+ >>> from pyteomics.auxiliary import cvquery\n+ >>> from pyteomics.parser import psims_rules\n+ >>> cvquery(psims_rules, 'MS:1001918')\n+ '(?<=W)'\n+\n+\"\"\"\n+\n+_psims_index = cvquery(psims_rules)\n+\ndef isoforms(sequence, **kwargs):\n\"\"\"\nApply variable and fixed modifications to the polypeptide and yield\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.2b3'\n+__version__ = '4.4.2b4'\nfrom collections import namedtuple\nimport re\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -61,6 +61,7 @@ class ParserTest(unittest.TestCase):\ndef test_cleave(self):\nself.assertEqual(parser._cleave('PEPTIDEKS', parser.expasy_rules['trypsin']), ['PEPTIDEK', 'S'])\nself.assertEqual(parser._cleave('PEPTIDEKS', 'trypsin'), ['PEPTIDEK', 'S'])\n+ self.assertEqual(parser._cleave('PEPTIDEKS', 'Trypsin'), ['PEPTIDEK', 'S'])\nfor seq in self.simple_sequences:\nfor elem in parser.cleave(\nseq, 'trypsin', int(random.uniform(1, 10))):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add parser.psims_rules |
377,522 | 26.03.2021 15:44:23 | -10,800 | cbdd1c26e836b0323369778be2493110aaca26e3 | Update changelog and version - to 4.4.2 | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.4.2 beta 5\n-------------\n+4.4.2\n+-----\n- Add cleavage rules from `MS ontology <http://purl.obolibrary.org/obo/MS_1001045>`_ as\n:py:data:`pyteomics.parser.psims_rules`. :py:func:`pyteomics.parser.cleave` now understands keys and accessions from\n:py:data:`psims_rules` as rules.\n+ - Improve mzIdentML parser performance (and possibly others in some cases) by relying more on offset indexes\n+ (`#34 <https://github.com/levitsky/pyteomics/pull/34>`_ by Joshua Klein).\n+\n- Extend the :py:class:`pyteomics.mztab.MzTab` parser with auto-generated properties. Almost all metadata entities are\nnow exposed as properties on the parser object (`#23 <https://github.com/levitsky/pyteomics/pull/23>`_ by Joshua Klein).\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.4.2b5'\n+__version__ = '4.4.2'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update changelog and version - to 4.4.2 |
377,522 | 02.04.2021 21:42:25 | -10,800 | 1a98f3d7512a73630c4bd76432c10d6a5218624f | Remove unused imports in USI test | [
{
"change_type": "MODIFY",
"old_path": "tests/test_usi.py",
"new_path": "tests/test_usi.py",
"diff": "@@ -4,8 +4,6 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport unittest\n-from itertools import product\n-import operator as op\nfrom pyteomics.usi import USI, proxi\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove unused imports in USI test |
377,522 | 05.04.2021 16:23:38 | -10,800 | d2f9881a7ba340845defb2fc5149961831e8e168 | Raise PyteomicsError for unknown backend names | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/usi.py",
"new_path": "pyteomics/usi.py",
"diff": "@@ -52,6 +52,8 @@ except ImportError:\ndef coerce_array(array_data):\nreturn [float(v) for v in array_data]\n+from .auxiliary import PyteomicsError\n+\nclass USI(namedtuple(\"USI\", ['protocol', 'dataset', 'datafile', 'scan_identifier_type', 'scan_identifier', 'interpretation'])):\n'''Represent a Universal Spectrum Identifier (USI).\n@@ -444,12 +446,14 @@ def proxi(usi, backend=default_backend, **kwargs):\nif isinstance(backend, str):\nif backend == AGGREGATOR_KEY:\nbackend = AGGREGATOR\n- else:\n+ elif backend in _proxies:\nbackend = _proxies[backend](**kwargs)\n+ else:\n+ raise PyteomicsError(\"Unknown PROXI backend name: {}.\".format(backend))\nelif isinstance(backend, type) and issubclass(backend, (_PROXIBackend, PROXIAggregator)):\nbackend = backend(**kwargs)\nelif callable(backend):\nbackend = backend\nelse:\n- raise TypeError(\"Unrecognized backend type\")\n+ raise TypeError(\"Unrecognized backend type: {0.__name__}\".format(type(backend)))\nreturn backend(usi)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Raise PyteomicsError for unknown backend names |
377,522 | 05.04.2021 16:24:11 | -10,800 | 7b8b6612cd7023ea59f7003e4a37f563c9ebed83 | Add test for proxi aggregator | [
{
"change_type": "MODIFY",
"old_path": "tests/test_usi.py",
"new_path": "tests/test_usi.py",
"diff": "@@ -5,7 +5,8 @@ pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir\nimport unittest\n-from pyteomics.usi import USI, proxi\n+from pyteomics.usi import USI, proxi, AGGREGATOR_KEY\n+from pyteomics.auxiliary import PyteomicsError\nclass USITest(unittest.TestCase):\n@@ -34,7 +35,26 @@ class PROXITest(unittest.TestCase):\nfor a, b in zip(response['intensity array'], usi_proxi_data['intensity array']):\nself.assertAlmostEqual(a, b, 3)\n+ def test_errors(self):\n+ usi_str = \"mzspec:MSV000085202:210320_SARS_CoV_2_T:scan:131256\"\n+ with self.assertRaises(TypeError, msg='Unrecognized backend type: NoneType'):\n+ proxi(usi_str, backend=None)\n+ with self.assertRaises(PyteomicsError, msg='Unknown PROXI backend name: BackendName'):\n+ proxi(usi_str, backend='BackendName')\n+\n+\n+class PROXIAggregatorTest(unittest.TestCase):\n+ def test_request(self):\n+ usi_str = \"mzspec:MSV000085202:210320_SARS_CoV_2_T:scan:131256\"\n+ response = proxi(usi_str, backend=AGGREGATOR_KEY)\n+\n+ assert set(usi_proxi_data.keys()) <= set(response.keys())\n+\n+ for a, b in zip(response['m/z array'], usi_proxi_data['m/z array']):\n+ self.assertAlmostEqual(a, b, 3)\n+ for a, b in zip(response['intensity array'], usi_proxi_data['intensity array']):\n+ self.assertAlmostEqual(a, b, 3)\nif __name__ == \"__main__\":\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add test for proxi aggregator |
377,522 | 08.04.2021 16:40:55 | -10,800 | 295e30e15d0cf86cb0926ff824397a2a6acc8e77 | Limit paths in unit-testing Github Action | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/pythonpackage.yml",
"new_path": ".github/workflows/pythonpackage.yml",
"diff": "name: tests\n-on: [push, pull_request]\n+on:\n+ push:\n+ paths:\n+ - '**.py'\n+ pull_request:\n+ paths:\n+ - '**.py'\njobs:\nbuild:\n@@ -13,7 +19,7 @@ jobs:\nsteps:\n- uses: actions/checkout@v2\n- name: Set up Python ${{ matrix.python-version }}\n- uses: actions/setup-python@v1\n+ uses: actions/setup-python@v2\nwith:\npython-version: ${{ matrix.python-version }}\n- name: Install dependencies\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Limit paths in unit-testing Github Action |
377,522 | 08.04.2021 17:14:28 | -10,800 | 34c87ac7198b7cff45cb46a4001345e87c6bb5a4 | Drop Python 3.5 from Github tests | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/pythonpackage.yml",
"new_path": ".github/workflows/pythonpackage.yml",
"diff": "@@ -14,7 +14,7 @@ jobs:\nruns-on: ubuntu-latest\nstrategy:\nmatrix:\n- python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9]\n+ python-version: [2.7, 3.6, 3.7, 3.8, 3.9]\nsteps:\n- uses: actions/checkout@v2\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Drop Python 3.5 from Github tests |
377,522 | 18.05.2021 17:57:13 | -10,800 | d9986a677bac0354797666e5e05d31bcbf96b2a6 | Draft addition of spectrum_utils backend | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/utils.py",
"new_path": "pyteomics/auxiliary/utils.py",
"diff": "@@ -21,6 +21,7 @@ try:\nexcept ImportError:\npynumpress = None\n+\ndef print_tree(d, indent_str=' -> ', indent_count=1):\n\"\"\"Read a nested dict (with strings as keys) and print its structure.\n\"\"\"\n@@ -91,16 +92,19 @@ _default_compression_map = {\n'zlib compression': zlib.decompress,\n}\n+\ndef _pynumpressDecompress(decoder):\ndef decode(data):\nreturn decoder(np.frombuffer(data, dtype=np.uint8))\nreturn decode\n+\ndef _zlibNumpress(decoder):\ndef decode(data):\nreturn decoder(np.frombuffer(zlib.decompress(data), dtype=np.uint8))\nreturn decode\n+\nif pynumpress:\n_default_compression_map.update(\n{\n@@ -112,6 +116,7 @@ if pynumpress:\n'MS-Numpress linear prediction compression followed by zlib compression': _zlibNumpress(pynumpress.decode_linear),\n})\n+\nif np is not None:\nclass BinaryDataArrayTransformer(object):\n\"\"\"A base class that provides methods for reading\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -40,7 +40,7 @@ See also\nDependencies\n------------\n-This module requires :py:mod:`matplotlib`. Optional dependencies: :py:mod:`adjustText`.\n+This module requires :py:mod:`matplotlib`. Optional dependencies: :py:mod:`adjustText`, :py:mod:`spectrum_utils`.\n-------------------------------------------------------------------------------\n@@ -63,7 +63,13 @@ This module requires :py:mod:`matplotlib`. Optional dependencies: :py:mod:`adjus\nimport pylab\nimport numpy as np\nfrom .auxiliary import linear_regression, PyteomicsError\n-from . import parser, mass\n+from . import parser, mass, mgf, mzml\n+\n+try:\n+ import spectrum_utils.spectrum as sus\n+ import spectrum_utils.plot as sup\n+except ImportError:\n+ sus = sup = None\ndef plot_line(a, b, xlim=None, *args, **kwargs):\n@@ -360,19 +366,35 @@ def plot_spectrum(spectrum, centroided=True, *args, **kwargs):\ndef _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\n+\n+ # common kwargs\ntypes = kwargs.pop('ion_types', ('b', 'y'))\n- maxcharge = kwargs.pop('maxcharge', 1)\naa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\nmass_data = kwargs.pop('mass_data', mass.nist_mass)\nion_comp = kwargs.pop('ion_comp', mass.std_ion_comp)\n- std_colors = {i: 'red' for i in 'xyz'}\n- std_colors.update({i: 'blue' for i in 'abc'})\n+ std_colors = {\n+ 'a': '#388E3C',\n+ 'b': '#1976D2',\n+ 'c': '#00796B',\n+ 'x': '#7B1FA2',\n+ 'y': '#D32F2F',\n+ 'z': '#F57C00',\n+ }\ncolors = kwargs.pop('colors', std_colors)\nftol = kwargs.pop('ftol', None)\n- centroided = kwargs.pop('centroided', True)\nif ftol is None:\nrtol = kwargs.pop('rtol', 1e-5)\ntext_kw = kwargs.pop('text_kw', dict(ha='center', clip_on=True, backgroundcolor='#ffffff99'))\n+ precursor_charge = kwargs.pop('precursor_charge', None)\n+ if precursor_charge is None:\n+ precursor_charge = _get_precursor_charge(spectrum)\n+ if precursor_charge is None:\n+ raise PyteomicsError('Could not extract precursor charge from spectrum. Please specify `precursor_charge` kwarg.')\n+ maxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))\n+ # end of common kwargs\n+\n+ # backend-specific kwargs\n+ centroided = kwargs.pop('centroided', True)\nadjust = kwargs.pop('adjust_text', None)\nif adjust or adjust is None:\ntry:\n@@ -386,6 +408,8 @@ def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nelse:\nif adjust is None:\nadjust = True\n+ # end of backend-specific kwargs\n+\nparsed = parser.parse(peptide, True, labels=list(aa_mass) + [parser.std_cterm, parser.std_nterm])\nn = len(parsed)\nmaxpeak = spectrum['intensity array'].max()\n@@ -423,8 +447,97 @@ def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nreturn plot_spectrum(spectrum, centroided, *args, **kwargs)\n+def _get_precursor_charge(spectrum):\n+ try:\n+ return mgf.MGFBase.parse_precursor_charge(spectrum['params']['charge'], list_only=True)[0]\n+ except (PyteomicsError, KeyError):\n+ pass\n+ try:\n+ return spectrum['precursorList']['precursor'][0]['selectedIonList']['selectedIon'][0]['selected ion m/z']\n+ except KeyError:\n+ pass\n+ return None\n+\n+\n+def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\n+ if sus is None:\n+ raise PyteomicsError('This backend requires `spectrum_utils`.')\n+\n+ # common kwargs\n+ types = kwargs.pop('ion_types', ('b', 'y'))\n+ aa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\n+\n+ tol = kwargs.pop('ftol', None)\n+ if tol is None:\n+ tol = kwargs.pop('rtol', 1e-5) * 1e6\n+ tol_mode = 'ppm'\n+ else:\n+ tol_mode = 'Da'\n+\n+ text_kw = kwargs.pop('text_kw', None)\n+ precursor_charge = kwargs.pop('precursor_charge', None)\n+ if precursor_charge is None:\n+ precursor_charge = _get_precursor_charge(spectrum)\n+ if precursor_charge is None:\n+ raise PyteomicsError('Could not extract precursor charge from spectrum. Please specify `precursor_charge` kwarg.')\n+ maxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))\n+ # end of common kwargs\n+\n+ # backend-specific parameters\n+ remove_precursor_peak = kwargs.pop('remove_precursor_peak', False)\n+ mz_range = kwargs.pop('mz_range', None)\n+ precursor_mz = mass.calculate_mass(peptide, aa_mass=aa_mass, charge=precursor_charge)\n+\n+ min_intensity = kwargs.pop('min_intensity', 0.0)\n+ max_num_peaks = kwargs.pop('max_num_peaks', None)\n+ scaling = kwargs.pop('scaling', None)\n+ max_intensity = kwargs.pop('max_intensity', None)\n+ peak_assignment = kwargs.pop('peak_assignment', 'most_intense')\n+\n+ spectrum = sus.MsmsSpectrum(\n+ None, precursor_mz, precursor_charge, spectrum['m/z array'], spectrum['intensity array'],\n+ peptide=peptide)\n+ if mz_range:\n+ spectrum = spectrum.set_mz_range(*mz_range)\n+ if remove_precursor_peak:\n+ spectrum = spectrum.remove_precursor_peak(tol, tol_mode)\n+ spectrum = spectrum.filter_intensity(min_intensity=min_intensity, max_num_peaks=max_num_peaks\n+ ).scale_intensity(scaling, max_intensity\n+ ).annotate_peptide_fragments(tol, tol_mode, types, maxcharge, peak_assignment)\n+ return spectrum\n+\n+\n+class SpectrumUtilsColorScheme:\n+ def __init__(self, colors):\n+ self.colors = colors\n+ self.previous_colors = sup.colors.copy()\n+\n+ def __enter__(self):\n+ if self.colors:\n+ sup.colors.update(self.colors)\n+\n+ def __exit__(self, *args, **kwargs):\n+ sup.colors = self.previous_colors\n+\n+\n+def _spectrum_utils_annotate_plot(spectrum, peptide, *args, **kwargs):\n+\n+ with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\n+ spectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)\n+ return sup.spectrum(spectrum)\n+\n+\n+def _spectrum_utils_annotate_iplot(spectrum, peptide, *args, **kwargs):\n+\n+ with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\n+ spectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)\n+ return sup.spectrum(spectrum)\n+\n+\n_annotation_backends = {\n'default': _default_annotate_spectrum,\n+ 'spectrum_utils': _spectrum_utils_annotate_plot,\n+ 'spectrum_utils.iplot': _spectrum_utils_annotate_iplot,\n}\n@@ -438,35 +551,65 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\npeptide : str\nA modX sequence.\nbackend : str, keyword only, optional\n- One of `{'default',}`.\n+ One of `{'default', 'spectrum_utils', 'spectrum_utils.iplot'}`.\n+ The `spectrum_utils` backend requires installing :py:mod:`spectrum_utils`.\n+ The `spectrum_utils.iplot` backend requires installing :py:mod:`spectrum_utils[iplot]`.\nion_types : Container, keyword only, optional\nIon types to be considered for annotation. Default is `('b', 'y')`.\n+ precursor_charge : str, keyword only, optional\n+ If not specified, an attempt is made to extract it from `spectrum`.\nmaxcharge : int, keyword only, optional\n- Maximum charge state for fragment ions to be considered. Default is `1`.\n+ Maximum charge state for fragment ions to be considered. Default is `precursor_charge - 1`.\ncolors : dict, keyword only, optional\n- Keys are ion types, values are colors to plot the annotated peaks with. Defaults to a red-blue scheme.\n+ Keys are ion types, values are colors to plot the annotated peaks with. Default depends on backend.\nftol : float, keyword only, optional\nA fixed m/z tolerance value for peak matching. Alternative to `rtol`.\nrtol : float, keyword only, optional\nA relative m/z error for peak matching. Default is 10 ppm.\ntext_kw : dict, keyword only, optional\nKeyword arguments for :py:func:`pylab.text`.\n- ion_comp : dict, keyword only, optional\n- A dictionary defining ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\n- mass_data : dict, keyword only, optional\n- A dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\naa_mass : dict, keyword only, optional\nA dictionary of amino acid residue masses.\n*args\nPassed to the plotting backend.\n**kwargs\nPassed to the plotting backend.\n+\n+\ncentroided : bool, keyword only, optional\nPassed to :py:func:`plot_spectrum`. Only works with `default` backend.\n+ ion_comp : dict, keyword only, optional\n+ A dictionary defining ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\n+ Only works with `default` backend.\n+ mass_data : dict, keyword only, optional\n+ A dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\n+ Only works with `default` backend.\nadjust_text : bool, keyword only, optional\nAdjust the overlapping text annotations using :py:mod:`adjustText`. Only works with `default` backend.\nadjust_kw : dict, keyword only, optional\nKeyword arguments for :py:func:`adjust_text`. Only works with `default` backend.\n+\n+\n+ remove_precursor_peak : bool, keyword only, optional\n+ Remove precursor peak from spectrum before annotation. Default is :p:const:`False`.\n+ Only works with `spectrum_utils` backend.\n+ min_intensity : float, keyword only, optional\n+ Remove low-intensity peaks; this is a factor of maximum peak intensity. Default is 0 (no filtering).\n+ Only works with `spectrum_utils` backend.\n+ max_num_peaks : int or None, keyword only, optional\n+ Remove low-intensity peaks; this is the number of peaks to keep. Default is :py:const:`None` (no filtering).\n+ Only works with `spectrum_utils` backend.\n+ scaling : one of `{'root', 'log', 'rank'}` or None, keyword only, optional\n+ Scaling to apply to peak intensities. Only works with `spectrum_utils` backend.\n+ max_intensity : float or None, keyword only, optional\n+ Intensity of the most intense peak relative to which the peaks will be scaled\n+ (the default is :py:const:`None`, which means that no scaling\n+ relative to the most intense peak will be performed).\n+ Only works with `spectrum_utils` backend.\n+ peak_assignment : one of `{'most_intense', 'nearest_mz'}`, keyword only, optional\n+ In case multiple peaks occur within the given mass window around a theoretical peak,\n+ only a single peak will be annotated with the fragment type.\n+ Default is `'most_intense'`. Only works with `spectrum_utils` backend.\n\"\"\"\nbname = kwargs.pop('backend', 'default')\nbackend = _annotation_backends.get(bname)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Draft addition of spectrum_utils backend |
377,522 | 20.05.2021 18:13:47 | -10,800 | af182eeb4ce65c1850096b72d2f5ad4ad6bf253f | Fix numpy deprecation warnings, update changelog | [
{
"change_type": "MODIFY",
"old_path": ".gitignore",
"new_path": ".gitignore",
"diff": "@@ -4,3 +4,4 @@ __pycache__\n*.egg-info\n*.pyc\n.ipynb_checkpoints\n+*.diff\n"
},
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.5dev2\n+4.5dev3\n-------\n- Add support for mzMLb (`#35 <https://github.com/levitsky/pyteomics/pull/35>`_\nand `#38 <https://github.com/levitsky/pyteomics/pull/38>`_ by Joshua Klein).\n- Add ProteomeExchange backed for PROXI requests and implement an aggregator for responses from all backends\n- (`#36 <https://github.com/levitsky/pyteomics/pull/36>`_ by Joshua Klein).\n+ (`#36 <https://github.com/levitsky/pyteomics/pull/36>`_ and `#45 <https://github.com/levitsky/pyteomics/pull/45>`_ by Joshua Klein).\n4.4.2\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.5dev2'\n+__version__ = '4.5dev3'\nfrom collections import namedtuple\nimport re\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -13,7 +13,7 @@ class ComparableArray(np.ndarray):\ndef __eq__(self, other):\nif not isinstance(other, np.ndarray):\nreturn False\n- other = np.asarray(other, dtype=np.float)\n+ other = np.asarray(other, dtype=float)\nreturn self.shape == other.shape and np.allclose(self, other)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -326,22 +326,22 @@ class FilterTest(unittest.TestCase):\nself._run_check_pep(psms, pep=self.pep)\ndef test_filter_array_str_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nself._run_check(psms, is_decoy='is decoy')\ndef test_filter_pep_array_str_pep(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nself._run_check_pep(psms, pep='pep')\ndef test_filter_array_str_is_decoy_str_key(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nself._run_check(psms, is_decoy='is decoy', key='score')\ndef test_filter_pep_array_str_pep_str_key(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nself._run_check_pep(psms, pep='pep', key='score')\n@@ -386,7 +386,7 @@ class FilterTest(unittest.TestCase):\nself.assertEqual(len(f11), 21)\ndef test_filter_array_iter_key_str_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = iter([self.key(psm) for psm in psms])\nf11 = aux.filter(psms, key=key, is_decoy='is decoy', fdr=0.5)\n@@ -397,7 +397,7 @@ class FilterTest(unittest.TestCase):\nself.assertEqual(len(f11), 26)\ndef test_filter_pep_array_iter_key_str_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = iter([self.key(psm) for psm in psms])\nf = aux.filter(psms, key=key, pep='pep', fdr=0.02)\n@@ -448,25 +448,25 @@ class FilterTest(unittest.TestCase):\nself._run_check_pep(psms, key='score', pep=self.pep)\ndef test_filter_dataframe_str_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npsms = pd.DataFrame(psms)\nself._run_check(psms, is_decoy='is decoy')\ndef test_filter_pep_dataframe_str_pep(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npsms = pd.DataFrame(psms)\nself._run_check(psms, pep='pep', key=self.key)\ndef test_filter_dataframe_str_key_str_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npsms = pd.DataFrame(psms)\nself._run_check(psms, key='score', is_decoy='is decoy')\ndef test_filter_empty_dataframe_str_key_str_is_decoy(self):\n- # dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ # dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = pd.DataFrame({'score': [], 'is decoy': []})\nf = aux.filter(psms, key='score', is_decoy='is decoy', fdr=0.1)\nself.assertEqual(f.shape[0], 0)\n@@ -474,41 +474,41 @@ class FilterTest(unittest.TestCase):\nself.assertEqual(f.shape[0], 0)\ndef test_filter_pep_dataframe_str_key_str_pep(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npsms = pd.DataFrame(psms)\nself._run_check_pep(psms, key='score', pep='pep')\ndef test_filter_dataframe_arr_key_str_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = psms['score']\npsms = pd.DataFrame(psms)\nself._run_check(psms, key=key, is_decoy='is decoy')\ndef test_filter_pep_dataframe_arr_key_str_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = psms['score']\npsms = pd.DataFrame(psms)\nself._run_check(psms, key=key, pep='pep')\ndef test_filter_dataframe_arr_key(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = psms['score']\npsms = pd.DataFrame(psms)\nself._run_check(psms, key=key)\ndef test_filter_pep_dataframe_arr_key(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = psms['score']\npsms = pd.DataFrame(psms)\nself._run_check_pep(psms, key=key, pep=self.pep)\ndef test_filter_dataframe_list_key_list_is_decoy(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = list(psms['score'])\nis_decoy = list(psms['is decoy'])\n@@ -516,7 +516,7 @@ class FilterTest(unittest.TestCase):\nself._run_check(psms, key=key, is_decoy=is_decoy)\ndef test_filter_pep_dataframe_list_key_list_pep(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nkey = list(psms['score'])\npep = list(psms['pep'])\n@@ -683,7 +683,7 @@ class FilterTest(unittest.TestCase):\ndef test_filter_two_dataframes_str_key_str_is_decoy(self):\ni = np.random.randint(1, len(self.psms)-1)\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npsms1 = pd.DataFrame(psms[:i])\npsms2 = pd.DataFrame(psms[i:])\n@@ -691,7 +691,7 @@ class FilterTest(unittest.TestCase):\ndef test_filter_pep_two_dataframes_str_key_str_pep(self):\ni = np.random.randint(1, len(self.psms)-1)\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npsms1 = pd.DataFrame(psms[:i])\npsms2 = pd.DataFrame(psms[i:])\n@@ -699,7 +699,7 @@ class FilterTest(unittest.TestCase):\ndef test_filter_two_dataframes_str_key_arr_is_decoy(self):\ni = np.random.randint(1, len(self.psms)-1)\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nis_decoy = psms['is decoy']\npsms1 = pd.DataFrame(psms[:i])\n@@ -708,7 +708,7 @@ class FilterTest(unittest.TestCase):\ndef test_filter_pep_two_dataframes_str_key_arr_pep(self):\ni = np.random.randint(1, len(self.psms)-1)\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npep = psms['pep']\npsms1 = pd.DataFrame(psms[:i])\n@@ -717,7 +717,7 @@ class FilterTest(unittest.TestCase):\ndef test_filter_two_dataframes_str_key_iter_is_decoy(self):\ni = np.random.randint(1, len(self.psms)-1)\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\nis_decoy = iter(psms['is decoy'])\npsms1 = pd.DataFrame(psms[:i])\n@@ -731,7 +731,7 @@ class FilterTest(unittest.TestCase):\ndef test_filter_pep_two_dataframes_str_key_iter_pep(self):\ni = np.random.randint(1, len(self.psms)-1)\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\npep = iter(psms['pep'])\npsms1 = pd.DataFrame(psms[:i])\n@@ -768,12 +768,12 @@ class FDRTest(unittest.TestCase):\nself.assertAlmostEqual(aux.fdr(iter(psms), pep=iter(pep)), 0.0355)\ndef test_fdr_array_str(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms_ = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in psms], dtype=dtype)\nself._run_check(psms_, is_decoy='is decoy', pep='pep')\ndef test_fdr_df_str(self):\n- dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]\npsms_ = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in psms], dtype=dtype)\npsms1 = pd.DataFrame(psms_)\nself._run_check(psms1, is_decoy='is decoy', pep='pep')\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix numpy deprecation warnings, update changelog |
377,522 | 30.06.2021 00:32:00 | -10,800 | 0de60bc63807905584ff1bc6dd45e660e82cb5f6 | Split the spectrum_utils annotation code | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -63,7 +63,7 @@ This module requires :py:mod:`matplotlib`. Optional dependencies: :py:mod:`adjus\nimport pylab\nimport numpy as np\nfrom .auxiliary import linear_regression, PyteomicsError\n-from . import parser, mass, mgf, mzml\n+from . import parser, mass, mgf\ntry:\nimport spectrum_utils.spectrum as sus\n@@ -459,14 +459,35 @@ def _get_precursor_charge(spectrum):\nreturn None\n-def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\n+def _spectrum_utils_create_spectrum(spectrum, peptide, *args, **kwargs):\nif sus is None:\nraise PyteomicsError('This backend requires `spectrum_utils`.')\n+ # backend-specific parameters\n+ mz_range = kwargs.pop('mz_range', None)\n+\n+\n+ min_intensity = kwargs.pop('min_intensity', 0.0)\n+ max_num_peaks = kwargs.pop('max_num_peaks', None)\n+ scaling = kwargs.pop('scaling', None)\n+ max_intensity = kwargs.pop('max_intensity', None)\n+ spectrum = sus.MsmsSpectrum(\n+ None, kwargs.pop('precursor_mz', None), kwargs.pop('precursor_charge', None),\n+ spectrum['m/z array'], spectrum['intensity array'],\n+ peptide=peptide)\n+ if mz_range:\n+ spectrum = spectrum.set_mz_range(*mz_range)\n+\n+ spectrum = spectrum.filter_intensity(min_intensity=min_intensity, max_num_peaks=max_num_peaks\n+ ).scale_intensity(scaling, max_intensity)\n+ return spectrum\n+\n+\n+def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\n+\n# common kwargs\n- types = kwargs.pop('ion_types', ('b', 'y'))\naa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\n-\n+ types = kwargs.pop('ion_types', ('b', 'y'))\ntol = kwargs.pop('ftol', None)\nif tol is None:\ntol = kwargs.pop('rtol', 1e-5) * 1e6\n@@ -474,40 +495,38 @@ def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nelse:\ntol_mode = 'Da'\n- text_kw = kwargs.pop('text_kw', None)\n+ kwargs.pop('text_kw', None) # not used\n+\nprecursor_charge = kwargs.pop('precursor_charge', None)\nif precursor_charge is None:\nprecursor_charge = _get_precursor_charge(spectrum)\nif precursor_charge is None:\n- raise PyteomicsError('Could not extract precursor charge from spectrum. Please specify `precursor_charge` kwarg.')\n+ raise PyteomicsError('Could not extract precursor charge from spectrum. '\n+ 'Please specify `precursor_charge` keyword argument.')\n+ precursor_mz = mass.calculate_mass(peptide, aa_mass=aa_mass, charge=precursor_charge)\nmaxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))\n# end of common kwargs\n# backend-specific parameters\n- remove_precursor_peak = kwargs.pop('remove_precursor_peak', False)\n- mz_range = kwargs.pop('mz_range', None)\n- precursor_mz = mass.calculate_mass(peptide, aa_mass=aa_mass, charge=precursor_charge)\n-\n- min_intensity = kwargs.pop('min_intensity', 0.0)\n- max_num_peaks = kwargs.pop('max_num_peaks', None)\n- scaling = kwargs.pop('scaling', None)\n- max_intensity = kwargs.pop('max_intensity', None)\npeak_assignment = kwargs.pop('peak_assignment', 'most_intense')\n+ remove_precursor_peak = kwargs.pop('remove_precursor_peak', False)\n+ annotate_mz = kwargs.pop('annotate_mz', None)\n+ annotate_mz_text = kwargs.pop('annotate_mz_text', None)\n- spectrum = sus.MsmsSpectrum(\n- None, precursor_mz, precursor_charge, spectrum['m/z array'], spectrum['intensity array'],\n- peptide=peptide)\n- if mz_range:\n- spectrum = spectrum.set_mz_range(*mz_range)\n+ spectrum = _spectrum_utils_create_spectrum(spectrum, peptide, *args,\n+ precursor_mz=precursor_mz, precursor_charge=precursor_charge, **kwargs)\nif remove_precursor_peak:\nspectrum = spectrum.remove_precursor_peak(tol, tol_mode)\n- spectrum = spectrum.filter_intensity(min_intensity=min_intensity, max_num_peaks=max_num_peaks\n- ).scale_intensity(scaling, max_intensity\n- ).annotate_peptide_fragments(tol, tol_mode, types, maxcharge, peak_assignment)\n+ spectrum = spectrum.annotate_peptide_fragments(tol, tol_mode, types, maxcharge, peak_assignment)\n+ if annotate_mz:\n+ for i, mz in enumerate(annotate_mz):\n+ spectrum = spectrum.annotate_mz_fragment(mz, None, tol, tol_mode, peak_assignment,\n+ annotate_mz_text[i] if annotate_mz_text else None)\nreturn spectrum\nclass SpectrumUtilsColorScheme:\n+ \"\"\"Context manager that temporarily changes `spectrum_utils.plot.colors`.\"\"\"\ndef __init__(self, colors):\nself.colors = colors\nself.previous_colors = sup.colors.copy()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Split the spectrum_utils annotation code |
377,522 | 11.08.2021 12:35:07 | -10,800 | 5408a430c07a34ed06da093873bdd190359c1f2a | Start work on modifications | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -504,6 +504,7 @@ def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nraise PyteomicsError('Could not extract precursor charge from spectrum. '\n'Please specify `precursor_charge` keyword argument.')\nprecursor_mz = mass.calculate_mass(peptide, aa_mass=aa_mass, charge=precursor_charge)\n+\nmaxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))\n# end of common kwargs\n@@ -512,9 +513,14 @@ def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nremove_precursor_peak = kwargs.pop('remove_precursor_peak', False)\nannotate_mz = kwargs.pop('annotate_mz', None)\nannotate_mz_text = kwargs.pop('annotate_mz_text', None)\n+ variable_mods = kwargs.get('modifications')\n+ if not variable_mods:\n+ clean_sequence, variable_mods = _spectrum_utils_parse_sequence(peptide, aa_mass)\n+ else:\n+ clean_sequence = peptide\n- spectrum = _spectrum_utils_create_spectrum(spectrum, peptide, *args,\n- precursor_mz=precursor_mz, precursor_charge=precursor_charge, **kwargs)\n+ spectrum = _spectrum_utils_create_spectrum(spectrum, clean_sequence, *args,\n+ precursor_mz=precursor_mz, precursor_charge=precursor_charge, modifications=variable_mods, **kwargs)\nif remove_precursor_peak:\nspectrum = spectrum.remove_precursor_peak(tol, tol_mode)\nspectrum = spectrum.annotate_peptide_fragments(tol, tol_mode, types, maxcharge, peak_assignment)\n@@ -539,6 +545,41 @@ class SpectrumUtilsColorScheme:\nsup.colors = self.previous_colors\n+class SpectrumUtilsStaticModifications:\n+ \"\"\"Context manager that temporarily changes `spectrum_utils` static modifications.\"\"\"\n+ def __init__(self, mods):\n+ self.mods = mods\n+ self.previous_aa_mass = sus._aa_mass\n+\n+ def __enter__(self):\n+ if self.mods:\n+ sus.reset_modifications()\n+ for mod in self.mods:\n+ sus.static_modification(*mod)\n+\n+ def __exit__(self, *args, **kwargs):\n+ sup._aa_mass = self.previous_aa_mass\n+\n+\n+def _spectrum_utils_parse_sequence(sequence, aa_mass=None):\n+ if isinstance(sequence, str):\n+ parsed = parser.parse(sequence, show_unmodified_termini=True)\n+ else:\n+ parsed = sequence\n+ mods = {}\n+ aa_mass = aa_mass or mass.std_aa_mass\n+ if parsed[0] != parser.std_nterm:\n+ mods['N-term'] = aa_mass[parsed[0]]\n+ if parsed[-1] != parser.std_cterm:\n+ mods['C-term'] = aa_mass[parsed[-1]]\n+ clean_sequence = []\n+ for i, aa in enumerate(parsed[1:-1]):\n+ if len(aa) > 1:\n+ mods[i] = aa_mass.get(aa, aa_mass[aa[:-1]] + aa_mass[aa[-1]])\n+ clean_sequence.append(aa[-1])\n+ return ''.join(clean_sequence), mods\n+\n+\ndef _spectrum_utils_annotate_plot(spectrum, peptide, *args, **kwargs):\nwith SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\n@@ -547,10 +588,10 @@ def _spectrum_utils_annotate_plot(spectrum, peptide, *args, **kwargs):\ndef _spectrum_utils_annotate_iplot(spectrum, peptide, *args, **kwargs):\n-\n+ import spectrum_utils.iplot as supi\nwith SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\nspectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)\n- return sup.spectrum(spectrum)\n+ return supi.spectrum(spectrum)\n_annotation_backends = {\n@@ -629,6 +670,10 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nIn case multiple peaks occur within the given mass window around a theoretical peak,\nonly a single peak will be annotated with the fragment type.\nDefault is `'most_intense'`. Only works with `spectrum_utils` backend.\n+ modifications : dict, optional\n+ A dict of variable modifications as described in\n+ `spectrum_utils documentation <https://spectrum-utils.readthedocs.io/en/latest/processing.html#variable-modifications>`_.\n+ You don't need to provide this if your `peptide` is a modX sequence and you supply `aa_mass`.\n\"\"\"\nbname = kwargs.pop('backend', 'default')\nbackend = _annotation_backends.get(bname)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Start work on modifications |
377,522 | 11.08.2021 19:44:00 | -10,800 | d47096235d15cd2a20ae44f9337f8c17a22cc6e7 | Update default featureXML schema_info and version, add overrides | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/_schema_defaults.py",
"new_path": "pyteomics/_schema_defaults.py",
"diff": "@@ -247,16 +247,22 @@ _trafoxml_schema_defaults = {'bools': set(),\n'ints': {('Pairs', 'count')},\n'lists': {'Pair', 'Param'}}\n-_featurexml_schema_defaults = {'bools': {\n- ('PeptideIdentification', 'higher_score_better'),\n- ('ProteinIdentification', 'higher_score_better'),\n- ('UnassignedPeptideIdentification', 'higher_score_better')},\n- 'charlists': set(),\n- 'floatlists': set(),\n+_featurexml_schema_defaults = {\n+ 'ints': {('PeptideHit', 'charge'),\n+ # ('PeptideIdentification', 'spectrum_reference'),\n+ ('SearchParameters', 'missed_cleavages'),\n+ # ('UnassignedPeptideIdentification', 'spectrum_reference'),\n+ ('featureList', 'count'),\n+ ('quality', 'dim'),\n+ ('position', 'dim'),\n+ ('feature', 'charge'),\n+ ('convexhull', 'nr'),\n+ },\n'floats': {('PeptideHit', 'score'),\n('PeptideIdentification', 'MZ'),\n('PeptideIdentification', 'RT'),\n('PeptideIdentification', 'significance_threshold'),\n+ ('ProteinHit', 'coverage'),\n('ProteinHit', 'score'),\n('ProteinIdentification', 'significance_threshold'),\n('SearchParameters', 'peak_mass_tolerance'),\n@@ -264,23 +270,42 @@ _featurexml_schema_defaults = {'bools': {\n('UnassignedPeptideIdentification', 'MZ'),\n('UnassignedPeptideIdentification', 'RT'),\n('UnassignedPeptideIdentification', 'significance_threshold'),\n- ('pt', 'x'), ('pt', 'y'), ('position', 'position'),\n- ('feature', 'overallquality'), ('feature', 'intensity')\n+ ('featureMap', 'version'),\n+ ('pt', 'x'),\n+ ('pt', 'y'),\n+ ('quality', 'quality'),\n+ ('position', 'position'),\n+ ('feature', 'overallquality'),\n+ ('feature', 'intensity'),\n},\n+ 'bools': {('PeptideIdentification', 'higher_score_better'),\n+ ('ProteinIdentification', 'higher_score_better'),\n+ ('SearchParameters', 'peak_mass_tolerance_ppm'),\n+ ('SearchParameters', 'precursor_peak_tolerance_ppm'),\n+ ('UnassignedPeptideIdentification', 'higher_score_better')},\n'intlists': set(),\n- 'ints': {('PeptideHit', 'charge'),\n- ('PeptideIdentification', 'spectrum_reference'),\n- ('SearchParameters', 'missed_cleavages'),\n- ('UnassignedPeptideIdentification', 'spectrum_reference'),\n- ('featureList', 'count'), ('convexhull', 'nr'),\n- ('position', 'dim'), ('feature', 'spectrum_index'),\n- ('feature', 'charge'), ('quality', 'dim'), ('quality', 'quality')},\n- 'lists': {'FixedModification', 'IdentificationRun',\n- 'PeptideHit', 'PeptideIdentification', 'ProteinHit',\n- 'UnassignedPeptideIdentification', 'VariableModification',\n- 'convexhull', 'dataProcessing', 'feature', 'hposition',\n- 'hullpoint', 'param', 'position', 'processingAction',\n- 'pt', 'quality', 'userParam'}}\n+ 'floatlists': set(),\n+ 'charlists': set(),\n+ 'lists': {'FixedModification',\n+ 'IdentificationRun',\n+ 'PeptideHit',\n+ 'PeptideIdentification',\n+ 'ProteinHit',\n+ 'ProteinIdentification',\n+ 'SearchParameters',\n+ 'UnassignedPeptideIdentification',\n+ 'UserParam',\n+ 'VariableModification',\n+ 'convexhull',\n+ 'dataProcessing',\n+ 'feature',\n+ 'hposition',\n+ 'hullpoint',\n+ 'param',\n+ 'position',\n+ 'processingAction',\n+ 'pt',\n+ 'quality'}}\n_tandem_schema_defaults = {'ints': {\n('group', 'z'), ('aa', 'at')} | {('domain', k) for k in [\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/openms/featurexml.py",
"new_path": "pyteomics/openms/featurexml.py",
"diff": "@@ -37,24 +37,50 @@ This module requres :py:mod:`lxml`.\n--------------------------------------------------------------------------------\n\"\"\"\n-from .. import xml, auxiliary as aux, _schema_defaults\n+from .. import xml, auxiliary as aux, _schema_defaults, version\nclass FeatureXML(xml.MultiProcessingXML):\n\"\"\"Parser class for featureXML files.\"\"\"\nfile_format = 'featureXML'\n_root_element = 'featureMap'\n_default_schema = _schema_defaults._featurexml_schema_defaults\n- _default_version = '1.6'\n+ _default_version = '1.9'\n_default_iter_tag = 'feature'\n_structures_to_flatten = {}\n_indexed_tags = {'feature'}\n_schema_location_param = 'noNamespaceSchemaLocation'\n+ _offending_keys = {'ints': {\n+ ('PeptideIdentification', 'spectrum_reference'),\n+ ('UnassignedPeptideIdentification', 'spectrum_reference'),\n+ ('quality', 'quality')\n+ }}\n+ _missing_keys = {'floats': {('quality', 'quality')}}\n+\ndef _get_info_smart(self, element, **kw):\nkw['recursive'] = kw.get('recursive', True)\ninfo = self._get_info(element, **kw)\nreturn info\n+ @xml._keepstate\n+ def _get_schema_info(self, read_schema=True):\n+ schema_info = super(FeatureXML, self)._get_schema_info(read_schema)\n+ if not read_schema:\n+ return schema_info\n+ file_version, schema = self.version_info\n+ if version._VersionInfo(file_version) < version._VersionInfo(self._default_version):\n+ for k, s in self._offending_keys.items():\n+ if k in schema_info:\n+ for elem in s:\n+ try:\n+ schema_info[k].remove(elem)\n+ except KeyError:\n+ pass\n+ for t, s in self._missing_keys.items():\n+ schema_info.setdefault(t, set()).update(s)\n+ return schema_info\n+\n+\ndef read(source, read_schema=True, iterative=True, use_index=False):\n\"\"\"Parse `source` and iterate through features.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -22,14 +22,39 @@ import re\nclass _VersionInfo(namedtuple('_VersionInfo', ('major', 'minor', 'micro', 'releaselevel', 'serial'))):\n\"\"\"Tuple mimicking :py:const:`sys.version_info`\"\"\"\ndef __new__(cls, version_str):\n+ if isinstance(version_str, str):\ngroups = re.match(r'(\\d+)\\.(\\d+)(?:\\.)?(\\d+)?([a-zA-Z]+)?(\\d+)?', version_str).groups()\ninst = super(_VersionInfo, cls).__new__(cls, *groups)\n+ else:\n+ inst = super(_VersionInfo, cls).__new__(cls, *(str(x) if x is not None else x for x in version_str))\ninst._version_str = version_str\n+ inst._version_ints = tuple(int(x) if isinstance(x, str) and x.isdigit() else x for x in inst)\nreturn inst\ndef __str__(self):\nreturn 'Pyteomics version {}'.format(self._version_str)\n+ def __lt__(self, other):\n+ if not isinstance(other, _VersionInfo):\n+ other = _VersionInfo(other)\n+ return self._version_ints < other._version_ints\n+\n+ def __gt__(self, other):\n+ if not isinstance(other, _VersionInfo):\n+ other = _VersionInfo(other)\n+ return self._version_ints > other._version_ints\n+\n+ def __le__(self, other):\n+ return self == other or self < other\n+\n+ def __ge__(self, other):\n+ return self == other or self > other\n+\n+ def __eq__(self, other):\n+ if not isinstance(other, _VersionInfo):\n+ other = _VersionInfo(other)\n+ return super(_VersionInfo, self).__eq__(other)\n+\nversion_info = _VersionInfo(__version__)\nversion = __version__\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update default featureXML schema_info and version, add overrides |
377,522 | 11.08.2021 22:17:47 | -10,800 | 20e4a5bb4e5ffe0d41c8985893cffc678b17c95c | Fix superfluous use_index warning | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -914,7 +914,7 @@ def _check_use_index(source, use_index, default):\nwarnings.warn('use_index is {}, but the file mode is {}. '\n'Setting `use_index` to {}'.format(use_index, source.mode, binary))\nuse_index = binary\n- else:\n+ elif use_index is None:\nwarnings.warn('Could not check mode on {}. Specify `use_index` explicitly to avoid errors.'.format(source))\nif use_index is not None:\n@@ -925,10 +925,11 @@ def _check_use_index(source, use_index, default):\nexcept PyteomicsError:\nraise\nexcept Exception as e:\n- warnings.warn('Could not check mode on {}. Reason: {!r}. Specify `use_index` explicitly to avoid errors.'.format(source, e))\n- if use_index is not None:\n- return use_index\n+ if use_index is None:\n+ warnings.warn('Could not check mode on {}. Reason: {!r}. '\n+ 'Specify `use_index` explicitly to avoid errors.'.format(source, e))\nreturn default\n+ return use_index\nclass FileReadingProcess(mp.Process):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix superfluous use_index warning |
377,522 | 11.08.2021 22:38:16 | -10,800 | f54469674b26d6cf7dc7ab32e4b1224584029d62 | Fix test for use_index warning | [
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -966,7 +966,7 @@ class UseIndexTest(unittest.TestCase):\nsource = UseIndexTest.MockFile(None, None)\nwith warnings.catch_warnings(record=True) as w:\nwarnings.simplefilter('always')\n- aux._check_use_index(source, True, None)\n+ aux._check_use_index(source, None, None)\nself.assertEqual(len(w), 1)\nself.assertIs(w[0].category, UserWarning)\nself.assertIn('Could not check mode', str(w[0].message))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix test for use_index warning |
377,527 | 17.08.2021 18:35:12 | -7,200 | be45ca426b5decfe87f4be6ced23af23c7546bbc | Shuffling that preserve some amino acids
incl. test | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -664,7 +664,7 @@ def reverse(sequence, keep_nterm=False, keep_cterm=False):\nreturn sequence[:start] + sequence[start:end][::-1] + sequence[end:]\n-def shuffle(sequence, keep_nterm=False, keep_cterm=False):\n+def shuffle(sequence, keep_nterm=False, keep_cterm=False, keep_M=False, fix_aa=''):\n\"\"\"\nCreate a decoy sequence by shuffling the original one.\n@@ -678,22 +678,54 @@ def shuffle(sequence, keep_nterm=False, keep_cterm=False):\nkeep_cterm : bool, optional\nIf :py:const:`True`, then the C-terminal residue will be kept.\nDefault is :py:const:`False`.\n-\n+ keep_M : bool, optional\n+ If :py:const:`True`, then the C-terminal methionine will be kept.\n+ Default is :py:const:`False`.\n+ fix_aa : str or list or tuple, optional\n+ single letter codes for amino acids that should preserve their position\n+ during shuffling.\n+ Default is ''.\nReturns\n-------\ndecoy_sequence : str\nThe decoy sequence.\n\"\"\"\n- start = 1 if keep_nterm else 0\n- end = len(sequence)-1 if keep_cterm else len(sequence)\n- if start == end:\n- return sequence\n- elif keep_cterm or keep_nterm:\n- return sequence[:start] + shuffle(sequence[start:end]) + sequence[end:]\n+ #empty sequence\n+ if len(sequence) == 0:\n+ return ''\n+\n+ #presereve the first position\n+ if (keep_M and sequence[0] == 'M') or keep_nterm:\n+ return sequence[0] + shuffle(sequence[1:], keep_cterm=keep_cterm,\n+ fix_aa=fix_aa)\n+\n+ #presereve the last position\n+ if keep_cterm:\n+ return shuffle(sequence[:-1], fix_aa=fix_aa) + sequence[-1]\n+\n+\n+ if type(fix_aa) in [list, tuple]:\n+ fix_aa = ''.join(fix_aa)\n+\n+ fixed = []\n+ position = 0\n+ if len(fix_aa) > 0: #non-empty fixed list\n+ shuffled = []\n+ for match in re.finditer(r'[{}]'.format(fix_aa), sequence):\n+ fixed.append((match.start(), sequence[match.start()]))\n+ shuffled += list(sequence[position:match.start()])\n+ position = match.end()\n+ shuffled += list(sequence[position:])\n+\n+ else: #shuffle everything\n+ shuffled = list(sequence)\n+\n+ random.shuffle(shuffled)\n+\n+ for fix in fixed:\n+ shuffled.insert(fix[0], fix[1])\n- modified_sequence = list(sequence)\n- random.shuffle(modified_sequence)\n- return ''.join(modified_sequence)\n+ return ''.join(shuffled)\ndef fused_decoy(sequence, decoy_mode='reverse', sep='R', **kwargs):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -4,6 +4,7 @@ import unittest\nimport random\nimport string\nimport pickle\n+import re\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom pyteomics import fasta\n@@ -85,6 +86,16 @@ class FastaTest(unittest.TestCase):\ntest = False\nself.assertFalse(test)\n+ test = True\n+ for s in sequences:\n+ aa = random.choice(string.ascii_uppercase)\n+ ss = fasta.shuffle(s, fix_aa=aa)\n+ self.assertEqual([_.span() for _ in re.finditer(aa, s)],\n+ [_.span() for _ in re.finditer(aa, ss)])\n+ if not all(a == b for a, b in zip(s, ss)):\n+ test = False\n+ self.assertFalse(test)\n+\ndef test_decoy_sequence_fused(self):\nsequences = [''.join(random.choice(string.ascii_uppercase)\nfor i in range(random.randint(1, 50)))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Shuffling that preserve some amino acids
incl. test |
377,522 | 24.08.2021 00:10:34 | -10,800 | b30b03de2354c4dd6876de600c7be347c092232b | Minor fixes, rename keep_M to keep_nterm_M, extend test | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -664,7 +664,7 @@ def reverse(sequence, keep_nterm=False, keep_cterm=False):\nreturn sequence[:start] + sequence[start:end][::-1] + sequence[end:]\n-def shuffle(sequence, keep_nterm=False, keep_cterm=False, keep_M=False, fix_aa=''):\n+def shuffle(sequence, keep_nterm=False, keep_cterm=False, keep_nterm_M=False, fix_aa=''):\n\"\"\"\nCreate a decoy sequence by shuffling the original one.\n@@ -678,24 +678,26 @@ def shuffle(sequence, keep_nterm=False, keep_cterm=False, keep_M=False, fix_aa='\nkeep_cterm : bool, optional\nIf :py:const:`True`, then the C-terminal residue will be kept.\nDefault is :py:const:`False`.\n- keep_M : bool, optional\n- If :py:const:`True`, then the C-terminal methionine will be kept.\n+ keep_nterm_M : bool, optional\n+ If :py:const:`True`, then the N-terminal methionine will be kept.\nDefault is :py:const:`False`.\n- fix_aa : str or list or tuple, optional\n- single letter codes for amino acids that should preserve their position\n+ fix_aa : iterable, optional\n+ Single letter codes for amino acids that should preserve their position\nduring shuffling.\nDefault is ''.\n+\nReturns\n-------\ndecoy_sequence : str\nThe decoy sequence.\n\"\"\"\n+\n# empty sequence\nif len(sequence) == 0:\nreturn ''\n# presereve the first position\n- if (keep_M and sequence[0] == 'M') or keep_nterm:\n+ if (keep_nterm_M and sequence[0] == 'M') or keep_nterm:\nreturn sequence[0] + shuffle(sequence[1:], keep_cterm=keep_cterm,\nfix_aa=fix_aa)\n@@ -704,7 +706,7 @@ def shuffle(sequence, keep_nterm=False, keep_cterm=False, keep_M=False, fix_aa='\nreturn shuffle(sequence[:-1], fix_aa=fix_aa) + sequence[-1]\n- if type(fix_aa) in [list, tuple]:\n+ if not isinstance(fix_aa, str):\nfix_aa = ''.join(fix_aa)\nfixed = []\n@@ -713,9 +715,9 @@ def shuffle(sequence, keep_nterm=False, keep_cterm=False, keep_M=False, fix_aa='\nshuffled = []\nfor match in re.finditer(r'[{}]'.format(fix_aa), sequence):\nfixed.append((match.start(), sequence[match.start()]))\n- shuffled += list(sequence[position:match.start()])\n+ shuffled.extend(sequence[position:match.start()])\nposition = match.end()\n- shuffled += list(sequence[position:])\n+ shuffled.extend(sequence[position:])\nelse: # shuffle everything\nshuffled = list(sequence)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -5,6 +5,7 @@ import random\nimport string\nimport pickle\nimport re\n+from collections import Counter\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom pyteomics import fasta\n@@ -88,8 +89,12 @@ class FastaTest(unittest.TestCase):\ntest = True\nfor s in sequences:\n- aa = random.choice(string.ascii_uppercase)\n- ss = fasta.shuffle(s, fix_aa=aa)\n+ n = random.randint(1, 5)\n+ fix_aa = [random.choice(string.ascii_uppercase) for _ in range(n)]\n+ ss = fasta.shuffle(s, fix_aa=fix_aa)\n+ self.assertEqual(len(s), len(ss))\n+ self.assertEqual(Counter(s), Counter(ss))\n+ for aa in fix_aa:\nself.assertEqual([_.span() for _ in re.finditer(aa, s)],\n[_.span() for _ in re.finditer(aa, ss)])\nif not all(a == b for a, b in zip(s, ss)):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Minor fixes, rename keep_M to keep_nterm_M, extend test |
377,522 | 27.08.2021 15:39:35 | -10,800 | b179189f2fc790866024404fcf965c02b72412d8 | Increment dev version, update changelog, fix title level in doc | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.5dev4\n+4.5dev5\n-------\n- Add support for `mzMLb <https://www.biorxiv.org/content/10.1101/2020.02.13.947218v3>`_\nand `#38 <https://github.com/levitsky/pyteomics/pull/38>`_ by Joshua Klein)\nwith new module :py:mod:`pyteomics.mzmlb`.\n- Add ProteomeExchange backend for PROXI requests and implement an aggregator for responses from all backends\n- (`#36 <https://github.com/levitsky/pyteomics/pull/36>`_ and\n- `#45 <https://github.com/levitsky/pyteomics/pull/45>`_ by Joshua Klein)\n+ (`#36 <https://github.com/levitsky/pyteomics/pull/36>`_,\n+ `#45 <https://github.com/levitsky/pyteomics/pull/45>`_, and\n+ `#55 <https://github.com/levitsky/pyteomics/pull/55>`_ by Joshua Klein)\nin :py:mod:`pyteomics.usi`.\n- Add support for `ProForma <https://www.psidev.info/proforma>`_\n(`#37 <https://github.com/levitsky/pyteomics/pull/37>`_ by Joshua Klein)\nin new module :py:mod:`pyteomics.proforma`.\n+ - New arguments `keep_nterm_M` and `fix_aa` in :py:func:`pyteomics.fasta.shuffle`\n+ (`#54 <https://github.com/levitsky/pyteomics/pull/54>`_ by Vladimir Gorshkov).\n+ - Fx for unwanted warnings in :py:func:`pyteomics.auxiliary.file_helpers._check_use_index` when\n+ `use_index` is explicitly passed (`#52 <https://github.com/levitsky/pyteomics/issues/52>`_).\n4.4.2\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data/xml.rst.inc",
"new_path": "doc/source/data/xml.rst.inc",
"diff": "@@ -69,7 +69,7 @@ does not have a counterpart in :py:mod:`pyteomics.mzxml`.\nmzMLb\n-~~~~~\n+-----\n**mzMLb** is an HDF5-based format which wraps an **mzML** file and intelligently re-organizes\nit for fast random access while reducing the on-disk file size using HDF5's rich support for\n@@ -77,6 +77,7 @@ data compression. If the dependencies, :py:mod:`h5py` and :py:mod:`hdf5plugin`,\n:py:mod:`pyteomics.mzmlb` can be used to access the data in these files just like :py:mod:`pyteomics.mzml`\nreads **mzML** files.\n+\npepXML\n------\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.5dev4'\n+__version__ = '4.5dev5'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Increment dev version, update changelog, fix title level in doc |
377,522 | 08.09.2021 01:58:50 | -10,800 | 4a45d4c1ac068a17f0e066bc2aa37db95ddc706b | Fixes for modifications | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -474,7 +474,7 @@ def _spectrum_utils_create_spectrum(spectrum, peptide, *args, **kwargs):\nspectrum = sus.MsmsSpectrum(\nNone, kwargs.pop('precursor_mz', None), kwargs.pop('precursor_charge', None),\nspectrum['m/z array'], spectrum['intensity array'],\n- peptide=peptide)\n+ peptide=peptide, modifications=kwargs.pop('modifications', None))\nif mz_range:\nspectrum = spectrum.set_mz_range(*mz_range)\n@@ -503,7 +503,7 @@ def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nif precursor_charge is None:\nraise PyteomicsError('Could not extract precursor charge from spectrum. '\n'Please specify `precursor_charge` keyword argument.')\n- precursor_mz = mass.calculate_mass(peptide, aa_mass=aa_mass, charge=precursor_charge)\n+ precursor_mz = mass.fast_mass2(peptide, aa_mass=aa_mass, charge=precursor_charge)\nmaxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))\n# end of common kwargs\n@@ -513,7 +513,7 @@ def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nremove_precursor_peak = kwargs.pop('remove_precursor_peak', False)\nannotate_mz = kwargs.pop('annotate_mz', None)\nannotate_mz_text = kwargs.pop('annotate_mz_text', None)\n- variable_mods = kwargs.get('modifications')\n+ variable_mods = kwargs.pop('modifications', None)\nif not variable_mods:\nclean_sequence, variable_mods = _spectrum_utils_parse_sequence(peptide, aa_mass)\nelse:\n@@ -563,7 +563,7 @@ class SpectrumUtilsStaticModifications:\ndef _spectrum_utils_parse_sequence(sequence, aa_mass=None):\nif isinstance(sequence, str):\n- parsed = parser.parse(sequence, show_unmodified_termini=True)\n+ parsed = parser.parse(sequence, show_unmodified_termini=True, labels=aa_mass, allow_unknown_modifications=True)\nelse:\nparsed = sequence\nmods = {}\n@@ -575,7 +575,14 @@ def _spectrum_utils_parse_sequence(sequence, aa_mass=None):\nclean_sequence = []\nfor i, aa in enumerate(parsed[1:-1]):\nif len(aa) > 1:\n- mods[i] = aa_mass.get(aa, aa_mass[aa[:-1]] + aa_mass[aa[-1]])\n+ if aa[:-1] in aa_mass:\n+ mods[i] = aa_mass[aa[:-1]]\n+ else:\n+ try:\n+ mods[i] = aa_mass[aa] - aa_mass[aa[-1]]\n+ except KeyError:\n+ raise PyteomicsError('Unknown modification mass: {0}. {0} or {1} must be in `aa_mass`.'.format(\n+ aa[:-1], aa))\nclean_sequence.append(aa[-1])\nreturn ''.join(clean_sequence), mods\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fixes for modifications |
377,522 | 08.09.2021 14:31:40 | -10,800 | 042e2cd8fbd4b3ac582f1a28c0f8075807e1962b | Doc fixes and cleanup | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -545,22 +545,6 @@ class SpectrumUtilsColorScheme:\nsup.colors = self.previous_colors\n-class SpectrumUtilsStaticModifications:\n- \"\"\"Context manager that temporarily changes `spectrum_utils` static modifications.\"\"\"\n- def __init__(self, mods):\n- self.mods = mods\n- self.previous_aa_mass = sus._aa_mass\n-\n- def __enter__(self):\n- if self.mods:\n- sus.reset_modifications()\n- for mod in self.mods:\n- sus.static_modification(*mod)\n-\n- def __exit__(self, *args, **kwargs):\n- sup._aa_mass = self.previous_aa_mass\n-\n-\ndef _spectrum_utils_parse_sequence(sequence, aa_mass=None):\nif isinstance(sequence, str):\nparsed = parser.parse(sequence, show_unmodified_termini=True, labels=aa_mass, allow_unknown_modifications=True)\n@@ -623,7 +607,7 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nThe `spectrum_utils.iplot` backend requires installing :py:mod:`spectrum_utils[iplot]`.\nion_types : Container, keyword only, optional\nIon types to be considered for annotation. Default is `('b', 'y')`.\n- precursor_charge : str, keyword only, optional\n+ precursor_charge : int, keyword only, optional\nIf not specified, an attempt is made to extract it from `spectrum`.\nmaxcharge : int, keyword only, optional\nMaximum charge state for fragment ions to be considered. Default is `precursor_charge - 1`.\n@@ -633,8 +617,6 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nA fixed m/z tolerance value for peak matching. Alternative to `rtol`.\nrtol : float, keyword only, optional\nA relative m/z error for peak matching. Default is 10 ppm.\n- text_kw : dict, keyword only, optional\n- Keyword arguments for :py:func:`pylab.text`.\naa_mass : dict, keyword only, optional\nA dictionary of amino acid residue masses.\n*args\n@@ -642,7 +624,6 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\n**kwargs\nPassed to the plotting backend.\n-\ncentroided : bool, keyword only, optional\nPassed to :py:func:`plot_spectrum`. Only works with `default` backend.\nion_comp : dict, keyword only, optional\n@@ -651,12 +632,13 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nmass_data : dict, keyword only, optional\nA dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\nOnly works with `default` backend.\n+ text_kw : dict, keyword only, optional\n+ Keyword arguments for :py:func:`pylab.text`. Only works with `default` backend.\nadjust_text : bool, keyword only, optional\nAdjust the overlapping text annotations using :py:mod:`adjustText`. Only works with `default` backend.\nadjust_kw : dict, keyword only, optional\nKeyword arguments for :py:func:`adjust_text`. Only works with `default` backend.\n-\nremove_precursor_peak : bool, keyword only, optional\nRemove precursor peak from spectrum before annotation. Default is :p:const:`False`.\nOnly works with `spectrum_utils` backend.\n@@ -680,7 +662,13 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nmodifications : dict, optional\nA dict of variable modifications as described in\n`spectrum_utils documentation <https://spectrum-utils.readthedocs.io/en/latest/processing.html#variable-modifications>`_.\n+\n+ .. note::\nYou don't need to provide this if your `peptide` is a modX sequence and you supply `aa_mass`.\n+\n+ .. note::\n+ To apply static modifications, provide `aa_mass` with modified masses.\n+\n\"\"\"\nbname = kwargs.pop('backend', 'default')\nbackend = _annotation_backends.get(bname)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Doc fixes and cleanup |
377,522 | 08.09.2021 15:22:18 | -10,800 | bcf8f9adc1c28bb94d075251de084103809f069d | Add annot_kws, start laying out plot backend structure | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -330,6 +330,34 @@ def plot_qvalue_curve(qvalues, *args, **kwargs):\nreturn pylab.plot(qvalues, 1 + np.arange(qvalues.size), *args, **kwargs)\n+def _default_plot_spectrum(spectrum, centroided=True, *args, **kwargs):\n+ pylab.xlabel(kwargs.pop('xlabel', 'm/z'))\n+ pylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n+ pylab.title(kwargs.pop('title', ''))\n+ if centroided:\n+ kwargs.setdefault('align', 'center')\n+ kwargs.setdefault('width', 0)\n+ kwargs.setdefault('linewidth', 1)\n+ kwargs.setdefault('edgecolor', 'k')\n+ return pylab.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ return pylab.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+\n+\n+def _spectrum_utils_plot(spectrum, *args, **kwargs):\n+ return NotImplemented\n+\n+\n+def _spectrum_utils_iplot(spectrum, *args, **kwargs):\n+ return NotImplemented\n+\n+\n+_plot_backends = {\n+ 'default': _default_plot_spectrum,\n+ 'spectrum_utils': _spectrum_utils_plot,\n+ 'spectrum_utils.iplot': _spectrum_utils_iplot,\n+}\n+\n+\ndef plot_spectrum(spectrum, centroided=True, *args, **kwargs):\n\"\"\"\nPlot a spectrum, assuming it is a dictionary containing \"m/z array\" and \"intensity array\".\n@@ -353,16 +381,12 @@ def plot_spectrum(spectrum, centroided=True, *args, **kwargs):\n**kwargs\nGiven to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n\"\"\"\n- pylab.xlabel(kwargs.pop('xlabel', 'm/z'))\n- pylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n- pylab.title(kwargs.pop('title', ''))\n- if centroided:\n- kwargs.setdefault('align', 'center')\n- kwargs.setdefault('width', 0)\n- kwargs.setdefault('linewidth', 1)\n- kwargs.setdefault('edgecolor', 'k')\n- return pylab.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n- return pylab.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ bname = kwargs.pop('backend', 'default')\n+ backend = _plot_backends.get(bname)\n+ if backend is None:\n+ raise PyteomicsError('Unknown backend name: {}. Should be one of: {}.'.format(\n+ bname, '; '.join(_plot_backends)))\n+ return backend(spectrum, *args, **kwargs)\ndef _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\n@@ -495,7 +519,7 @@ def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nelse:\ntol_mode = 'Da'\n- kwargs.pop('text_kw', None) # not used\n+ # kwargs.pop('text_kw', None) # not used\nprecursor_charge = kwargs.pop('precursor_charge', None)\nif precursor_charge is None:\n@@ -575,7 +599,7 @@ def _spectrum_utils_annotate_plot(spectrum, peptide, *args, **kwargs):\nwith SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\nspectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)\n- return sup.spectrum(spectrum)\n+ return sup.spectrum(spectrum, annot_kws=kwargs.pop('text_kw'))\ndef _spectrum_utils_annotate_iplot(spectrum, peptide, *args, **kwargs):\n@@ -619,6 +643,8 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nA relative m/z error for peak matching. Default is 10 ppm.\naa_mass : dict, keyword only, optional\nA dictionary of amino acid residue masses.\n+ text_kw : dict, keyword only, optional\n+ Keyword arguments for :py:func:`pylab.text`.\n*args\nPassed to the plotting backend.\n**kwargs\n@@ -632,8 +658,7 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nmass_data : dict, keyword only, optional\nA dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\nOnly works with `default` backend.\n- text_kw : dict, keyword only, optional\n- Keyword arguments for :py:func:`pylab.text`. Only works with `default` backend.\n+\nadjust_text : bool, keyword only, optional\nAdjust the overlapping text annotations using :py:mod:`adjustText`. Only works with `default` backend.\nadjust_kw : dict, keyword only, optional\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add annot_kws, start laying out plot backend structure |
377,522 | 09.09.2021 16:42:39 | -10,800 | 08ddb4921e247438281e6c273aae74738e35a427 | Add plot_spectrum() with spectrum_utils backends | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -330,11 +330,8 @@ def plot_qvalue_curve(qvalues, *args, **kwargs):\nreturn pylab.plot(qvalues, 1 + np.arange(qvalues.size), *args, **kwargs)\n-def _default_plot_spectrum(spectrum, centroided=True, *args, **kwargs):\n- pylab.xlabel(kwargs.pop('xlabel', 'm/z'))\n- pylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n- pylab.title(kwargs.pop('title', ''))\n- if centroided:\n+def _default_plot_spectrum(spectrum, *args, **kwargs):\n+ if kwargs.pop('centroided', True):\nkwargs.setdefault('align', 'center')\nkwargs.setdefault('width', 0)\nkwargs.setdefault('linewidth', 1)\n@@ -344,11 +341,17 @@ def _default_plot_spectrum(spectrum, centroided=True, *args, **kwargs):\ndef _spectrum_utils_plot(spectrum, *args, **kwargs):\n- return NotImplemented\n+\n+ with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\n+ spectrum = _spectrum_utils_create_spectrum(spectrum, None, *args, **kwargs)\n+ return sup.spectrum(spectrum)\ndef _spectrum_utils_iplot(spectrum, *args, **kwargs):\n- return NotImplemented\n+ import spectrum_utils.iplot as supi\n+ with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\n+ spectrum = _spectrum_utils_create_spectrum(spectrum, None, *args, **kwargs)\n+ return supi.spectrum(spectrum)\n_plot_backends = {\n@@ -358,34 +361,58 @@ _plot_backends = {\n}\n-def plot_spectrum(spectrum, centroided=True, *args, **kwargs):\n+def plot_spectrum(spectrum, *args, **kwargs):\n\"\"\"\nPlot a spectrum, assuming it is a dictionary containing \"m/z array\" and \"intensity array\".\nParameters\n----------\nspectrum : dict\n- A dictionary, as returned by MGF, mzML or mzXML parsers.\n+ A dictionary, as returned by pyteomics MS data parsers.\nMust contain \"m/z array\" and \"intensity array\" keys with decoded arrays.\n- centroided : bool, optional\n- If :py:const:`True` (default), peaks of the spectrum are plotted using :py:func:`pylab.bar`.\n- If :py:const:`False`, the arrays are simply plotted using :py:func:`pylab.plot`.\n+ backend : str, keyword only, optional\n+ One of `{'default', 'spectrum_utils', 'spectrum_utils.iplot'}`.\n+ The `spectrum_utils` backend requires installing :py:mod:`spectrum_utils`.\n+ The `spectrum_utils.iplot` backend requires installing :py:mod:`spectrum_utils[iplot]`.\nxlabel : str, keyword only, optional\nLabel for the X axis. Default is \"m/z\".\nylabel : str, keyword only, optional\nLabel for the Y axis. Default is \"intensity\".\ntitle : str, keyword only, optional\nThe title. Empty by default.\n+\n+ centroided : bool, keyword only, optional\n+ Works only for the `default` backend.\n+ If :py:const:`True` (default), peaks of the spectrum are plotted using :py:func:`pylab.bar`.\n+ If :py:const:`False`, the arrays are simply plotted using :py:func:`pylab.plot`.\n*args\n- Given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n+ When using `default` backend: given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n**kwargs\n- Given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n+ When using `default` backend: given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n+\n+ min_intensity : float, keyword only, optional\n+ Remove low-intensity peaks; this is a factor of maximum peak intensity. Default is 0 (no filtering).\n+ Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\n+ max_num_peaks : int or None, keyword only, optional\n+ Remove low-intensity peaks; this is the number of peaks to keep. Default is :py:const:`None` (no filtering).\n+ Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\n+ scaling : one of `{'root', 'log', 'rank'}` or None, keyword only, optional\n+ Scaling to apply to peak intensities. Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\n+ max_intensity : float or None, keyword only, optional\n+ Intensity of the most intense peak relative to which the peaks will be scaled\n+ (the default is :py:const:`None`, which means that no scaling\n+ relative to the most intense peak will be performed).\n+ Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\n\"\"\"\nbname = kwargs.pop('backend', 'default')\nbackend = _plot_backends.get(bname)\nif backend is None:\nraise PyteomicsError('Unknown backend name: {}. Should be one of: {}.'.format(\nbname, '; '.join(_plot_backends)))\n+\n+ pylab.xlabel(kwargs.pop('xlabel', 'm/z'))\n+ pylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n+ pylab.title(kwargs.pop('title', ''))\nreturn backend(spectrum, *args, **kwargs)\n@@ -606,7 +633,7 @@ def _spectrum_utils_annotate_iplot(spectrum, peptide, *args, **kwargs):\nimport spectrum_utils.iplot as supi\nwith SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\nspectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)\n- return supi.spectrum(spectrum)\n+ return supi.spectrum(spectrum, annot_kws=kwargs.pop('text_kw'))\n_annotation_backends = {\n@@ -669,21 +696,21 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nOnly works with `spectrum_utils` backend.\nmin_intensity : float, keyword only, optional\nRemove low-intensity peaks; this is a factor of maximum peak intensity. Default is 0 (no filtering).\n- Only works with `spectrum_utils` backend.\n+ Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\nmax_num_peaks : int or None, keyword only, optional\nRemove low-intensity peaks; this is the number of peaks to keep. Default is :py:const:`None` (no filtering).\n- Only works with `spectrum_utils` backend.\n+ Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\nscaling : one of `{'root', 'log', 'rank'}` or None, keyword only, optional\n- Scaling to apply to peak intensities. Only works with `spectrum_utils` backend.\n+ Scaling to apply to peak intensities. Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\nmax_intensity : float or None, keyword only, optional\nIntensity of the most intense peak relative to which the peaks will be scaled\n(the default is :py:const:`None`, which means that no scaling\nrelative to the most intense peak will be performed).\n- Only works with `spectrum_utils` backend.\n+ Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\npeak_assignment : one of `{'most_intense', 'nearest_mz'}`, keyword only, optional\nIn case multiple peaks occur within the given mass window around a theoretical peak,\nonly a single peak will be annotated with the fragment type.\n- Default is `'most_intense'`. Only works with `spectrum_utils` backend.\n+ Default is `'most_intense'`. Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.\nmodifications : dict, optional\nA dict of variable modifications as described in\n`spectrum_utils documentation <https://spectrum-utils.readthedocs.io/en/latest/processing.html#variable-modifications>`_.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add plot_spectrum() with spectrum_utils backends |
377,522 | 09.09.2021 17:46:42 | -10,800 | 8f3b1771ae44597840015c5f722a1930427a7f2e | Add a simple mirror plot function | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -24,6 +24,8 @@ Spectrum visualization\n:py:func:`annotate_spectrum` - plot and annotate peaks in MS/MS spectrum.\n+ :py:func:`mirror` - create a mirror plot of two spectra (using :py:mod:`spectrum_utils`).\n+\nFDR control\n-----------\n@@ -728,3 +730,30 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nraise PyteomicsError('Unknown backend name: {}. Should be one of: {}.'.format(\nbname, '; '.join(_annotation_backends)))\nreturn backend(spectrum, peptide, *args, **kwargs)\n+\n+\n+def mirror(spec_top, spec_bottom, peptide=None, spectrum_kws=None, ax=None, **kwargs):\n+ \"\"\"Create a mirror plot of two (possible annotated) spectra using `spectrum_utils`.\n+\n+ .. note ::\n+ Requires :py:mod:`spectrum_utils`.\n+\n+ Parameters\n+ ----------\n+ spec_top : dict\n+ A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.\n+ spec_bottom : dict\n+ A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.\n+ peptide : str or None, optional\n+ A modX sequence. If provided, the peaks will be annotated as peptide fragments.\n+ spectrum_kws : dict or None, optional\n+ Passed to :py:func:`spectrum_utils.plot.mirror`.\n+ ax : matplotlib.pyplot.Axes or None, optional\n+ Passed to :py:func:`spectrum_utils.plot.mirror`.\n+ **kwargs : same as for :py:func:`annotate_spectrum` for `spectrum_utils` backends.\n+ \"\"\"\n+\n+ spec_gen = _spectrum_utils_create_spectrum if peptide is None else _spectrum_utils_annotate_spectrum\n+ spec_top = spec_gen(spec_top, peptide, **kwargs)\n+ spec_bottom = spec_gen(spec_bottom, peptide, **kwargs)\n+ return sup.mirror(spec_top, spec_bottom, spectrum_kws=spectrum_kws, ax=ax)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add a simple mirror plot function |
377,522 | 09.09.2021 17:58:36 | -10,800 | 7def94a516496d2fc9e3dd28a2f1da64c6cda322 | Add axis labels and title to mirror | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -674,6 +674,13 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nA dictionary of amino acid residue masses.\ntext_kw : dict, keyword only, optional\nKeyword arguments for :py:func:`pylab.text`.\n+ xlabel : str, keyword only, optional\n+ Label for the X axis. Default is \"m/z\".\n+ ylabel : str, keyword only, optional\n+ Label for the Y axis. Default is \"intensity\".\n+ title : str, keyword only, optional\n+ The title. Empty by default.\n+\n*args\nPassed to the plotting backend.\n**kwargs\n@@ -729,6 +736,9 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nif backend is None:\nraise PyteomicsError('Unknown backend name: {}. Should be one of: {}.'.format(\nbname, '; '.join(_annotation_backends)))\n+ pylab.xlabel(kwargs.pop('xlabel', 'm/z'))\n+ pylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n+ pylab.title(kwargs.pop('title', ''))\nreturn backend(spectrum, peptide, *args, **kwargs)\n@@ -750,10 +760,22 @@ def mirror(spec_top, spec_bottom, peptide=None, spectrum_kws=None, ax=None, **kw\nPassed to :py:func:`spectrum_utils.plot.mirror`.\nax : matplotlib.pyplot.Axes or None, optional\nPassed to :py:func:`spectrum_utils.plot.mirror`.\n+ xlabel : str, keyword only, optional\n+ Label for the X axis. Default is \"m/z\".\n+ ylabel : str, keyword only, optional\n+ Label for the Y axis. Default is \"intensity\".\n+ title : str, keyword only, optional\n+ The title. Empty by default.\n+\n**kwargs : same as for :py:func:`annotate_spectrum` for `spectrum_utils` backends.\n\"\"\"\nspec_gen = _spectrum_utils_create_spectrum if peptide is None else _spectrum_utils_annotate_spectrum\nspec_top = spec_gen(spec_top, peptide, **kwargs)\nspec_bottom = spec_gen(spec_bottom, peptide, **kwargs)\n- return sup.mirror(spec_top, spec_bottom, spectrum_kws=spectrum_kws, ax=ax)\n+\n+ ax = sup.mirror(spec_top, spec_bottom, spectrum_kws=spectrum_kws, ax=ax)\n+ ax.set_xlabel(kwargs.pop('xlabel', 'm/z'))\n+ ax.set_ylabel(kwargs.pop('ylabel', 'intensity'))\n+ ax.set_title(kwargs.pop('title', ''))\n+ return ax\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add axis labels and title to mirror |
377,522 | 14.09.2021 15:30:38 | -10,800 | 0a20d655d7377b37d4f4ec2ceae0d095b68abeef | Add axes arg for other functions | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -333,13 +333,14 @@ def plot_qvalue_curve(qvalues, *args, **kwargs):\ndef _default_plot_spectrum(spectrum, *args, **kwargs):\n+ ax = kwargs.pop('ax', None) or pylab.gca()\nif kwargs.pop('centroided', True):\nkwargs.setdefault('align', 'center')\nkwargs.setdefault('width', 0)\nkwargs.setdefault('linewidth', 1)\nkwargs.setdefault('edgecolor', 'k')\n- return pylab.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n- return pylab.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ return ax.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ return ax.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\ndef _spectrum_utils_plot(spectrum, *args, **kwargs):\n@@ -444,6 +445,7 @@ def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nif precursor_charge is None:\nraise PyteomicsError('Could not extract precursor charge from spectrum. Please specify `precursor_charge` kwarg.')\nmaxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))\n+ ax = kwargs.get('ax', None)\n# end of common kwargs\n# backend-specific kwargs\n@@ -488,7 +490,7 @@ def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nelse:\nmatch = np.where(matrix / spectrum['m/z array'] < rtol)\npseudo_spec = {'m/z array': spectrum['m/z array'][match[1]], 'intensity array': spectrum['intensity array'][match[1]]}\n- plot_spectrum(pseudo_spec, centroided=True, edgecolor=c)\n+ plot_spectrum(pseudo_spec, centroided=True, edgecolor=c, ax=ax)\nfor j, i in zip(*match):\nx = spectrum['m/z array'][i]\ny = spectrum['intensity array'][i] + maxpeak * 0.02\n@@ -628,14 +630,14 @@ def _spectrum_utils_annotate_plot(spectrum, peptide, *args, **kwargs):\nwith SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\nspectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)\n- return sup.spectrum(spectrum, annot_kws=kwargs.pop('text_kw'))\n+ return sup.spectrum(spectrum, annot_kws=kwargs.pop('text_kw', None), ax=kwargs.pop('ax', None))\ndef _spectrum_utils_annotate_iplot(spectrum, peptide, *args, **kwargs):\nimport spectrum_utils.iplot as supi\nwith SpectrumUtilsColorScheme(kwargs.pop('colors', None)):\nspectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)\n- return supi.spectrum(spectrum, annot_kws=kwargs.pop('text_kw'))\n+ return supi.spectrum(spectrum, annot_kws=kwargs.pop('text_kw', None), ax=kwargs.pop('ax', None))\n_annotation_backends = {\n@@ -680,6 +682,8 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\nLabel for the Y axis. Default is \"intensity\".\ntitle : str, keyword only, optional\nThe title. Empty by default.\n+ ax : matplotlib.pyplot.Axes, keyword only, optional\n+ Axes to draw the spectrum.\n*args\nPassed to the plotting backend.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add axes arg for other functions |
377,522 | 14.09.2021 15:47:58 | -10,800 | 6e4c8f59803a2acfe98fac71eb1a6ed770f4bb09 | Changelog update, version bump | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.5dev6\n--------\n+4.5b1\n+-----\n- Add support for `mzMLb <https://www.biorxiv.org/content/10.1101/2020.02.13.947218v3>`_\n(`#35 <https://github.com/levitsky/pyteomics/pull/35>`_\n`use_index` is explicitly passed (`#52 <https://github.com/levitsky/pyteomics/issues/52>`_).\n- Update the default XML schema for featureXML and fix issues with incorrectly specified data types\n(`#53 <https://github.com/levitsky/pyteomics/pull/53>`_).\n+ - Add a new backend for spectrum annotation and plotting. :py:func:`pyteomics.pylab_aux.plot_spectrum` and\n+ :py:func:`pyteomics.pylab_aux.annotate_spectrum` can now use\n+ `spectrum_utils <https://github.com/bittremieux/spectrum_utils>`_ under the hood\n+ (#43 <https://github.com/levitsky/pyteomics/pull/43>`_).\n4.4.2\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.5dev6'\n+__version__ = '4.5b1'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Changelog update, version bump |
377,522 | 14.09.2021 16:27:03 | -10,800 | 48ae335ace83d233c0b57eb73c5f7555dc245657 | Add mzmlb.chain, fix copy errors in doc | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzmlb.py",
"new_path": "pyteomics/mzmlb.py",
"diff": "@@ -15,7 +15,7 @@ about mzMLb and its features. Please refer to\n`psidev.info <https://www.psidev.info/mzML>`_ for the detailed\nspecification of the format and structure of mzML files.\n-This module provides a minimalistic way to extract information from mzML\n+This module provides a minimalistic way to extract information from mzMLb\nfiles. You can use the old functional interface (:py:func:`read`) or the new\nobject-oriented interface (:py:class:`MzMLb` to iterate over entries in ``<spectrum>`` elements.\n:py:class:`MzMLb` also support direct indexing with spectrum IDs or indices.\n@@ -30,6 +30,11 @@ Data access\nsingle spectrum are converted to a human-readable dict. Spectra themselves are\nstored under 'm/z array' and 'intensity array' keys.\n+ :py:func:`chain` - read multiple mzMLb files at once.\n+\n+ :py:func:`chain.from_iterable` - read multiple files at once, using an\n+ iterable of files.\n+\nControlled Vocabularies\n~~~~~~~~~~~~~~~~~~~~~~~\nmzMLb relies on controlled vocabularies to describe its contents extensibly. See\n@@ -65,6 +70,7 @@ import numpy as np\nfrom pyteomics.mzml import MzML as _MzML\nfrom pyteomics.auxiliary.file_helpers import HierarchicalOffsetIndex, TaskMappingMixin, TimeOrderedIndexedReaderMixin, FileReader\n+from pyteomics import auxiliary as aux, xml\ndef delta_predict(data, copy=True):\n@@ -299,7 +305,6 @@ class chunk_interval_cache_record(namedtuple(\"chunk_interval_cache_record\", (\"st\nreturn hash(self.start)\n-\nclass ExternalArrayRegistry(object):\n'''Read chunks out of a single long array\n@@ -359,9 +364,9 @@ class ExternalArrayRegistry(object):\nclass MzMLb(TimeOrderedIndexedReaderMixin, TaskMappingMixin):\n- '''A parser for mzMLb [1]_\n+ '''A parser for mzMLb [1]_.\n- Provides an identical interface to :class:`~pyteomics.mzml.MzML`\n+ Provides an identical interface to :class:`~pyteomics.mzml.MzML`.\nAttributes\n----------\n@@ -374,7 +379,7 @@ class MzMLb(TimeOrderedIndexedReaderMixin, TaskMappingMixin):\nspecial behavior for retrieving the out-of-band data arrays\nfrom their respective storage locations.\nschema_version : str\n- The mzMLb HDF5 schema version, distinct from the mzML schema inside it\n+ The mzMLb HDF5 schema version, distinct from the mzML schema inside it.\nReferences\n@@ -587,7 +592,7 @@ def read(source, dtype=None):\nParameters\n----------\nsource : str or file\n- A path to a target mzML file or the file object itself.\n+ A path to a target mzMLb file or the file object itself.\ndtype : type or dict, optional\ndtype to convert arrays to, one for both m/z and intensity arrays or one for each key.\nIf :py:class:`dict`, keys should be 'm/z array' and 'intensity array'.\n@@ -604,3 +609,10 @@ def read(source, dtype=None):\n# The MzMLb class is detatched from the normal :class:`FileReader`-based inheritance tree,\n# this grafts it back on for :func:`isinstance` and :func:`issubclass` tests at least.\nFileReader.register(MzMLb)\n+\n+\n+version_info = xml._make_version_info(MzMLb)\n+\n+# chain = aux._make_chain(read, 'read')\n+\n+chain = aux.ChainBase._make_chain(MzMLb)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzmlb.py",
"new_path": "tests/test_mzmlb.py",
"diff": "@@ -6,10 +6,10 @@ from io import BytesIO\npyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]\nfrom data import mzml_spectra\ntry:\n- from pyteomics.mzmlb import MzMLb, read\n+ from pyteomics.mzmlb import MzMLb, read, chain\nreason = None\nexcept ImportError as err:\n- MzMLb = read = None\n+ MzMLb = read = chain = None\nreason = err\nfrom pyteomics.auxiliary import FileReader\n@@ -20,7 +20,7 @@ class MzMLbTest(unittest.TestCase):\npath = 'test.mzMLb'\ndef test_read(self):\n- for func in [MzMLb, read, ]:\n+ for func in [MzMLb, read, chain]:\nwith func(self.path) as r:\n# http://stackoverflow.com/q/14246983/1258041\nself.assertEqual(mzml_spectra, list(r))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add mzmlb.chain, fix copy errors in doc |
377,522 | 14.09.2021 19:28:39 | -10,800 | 11cda6e6d16866107c4abb2ebeee9c9a0df9de74 | Fix default plotting | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.5b1\n+4.5b2\n-----\n- Add support for `mzMLb <https://www.biorxiv.org/content/10.1101/2020.02.13.947218v3>`_\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -415,7 +415,8 @@ def plot_spectrum(spectrum, *args, **kwargs):\npylab.xlabel(kwargs.pop('xlabel', 'm/z'))\npylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n- pylab.title(kwargs.pop('title', ''))\n+ if 'title' in kwargs:\n+ pylab.title(kwargs.pop('title'))\nreturn backend(spectrum, *args, **kwargs)\n@@ -426,7 +427,7 @@ def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\naa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\nmass_data = kwargs.pop('mass_data', mass.nist_mass)\nion_comp = kwargs.pop('ion_comp', mass.std_ion_comp)\n- std_colors = {\n+ colors = {\n'a': '#388E3C',\n'b': '#1976D2',\n'c': '#00796B',\n@@ -434,7 +435,7 @@ def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\n'y': '#D32F2F',\n'z': '#F57C00',\n}\n- colors = kwargs.pop('colors', std_colors)\n+ colors.update(kwargs.pop('colors', {}))\nftol = kwargs.pop('ftol', None)\nif ftol is None:\nrtol = kwargs.pop('rtol', 1e-5)\n@@ -499,7 +500,7 @@ def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):\nif adjust:\nadjust_text(texts, **adjust_kw)\nkwargs.setdefault('zorder', -1)\n- return plot_spectrum(spectrum, centroided, *args, **kwargs)\n+ return plot_spectrum(spectrum, *args, **kwargs, centroided=centroided)\ndef _get_precursor_charge(spectrum):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.5b1'\n+__version__ = '4.5b2'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix default plotting |
377,522 | 15.09.2021 18:16:35 | -10,800 | 7ebe003e7f4373a3bc7221a5bd2650c3720be0b8 | Return Axes from plot_spectrum and annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.5b2\n+4.5b3\n-----\n- Add support for `mzMLb <https://www.biorxiv.org/content/10.1101/2020.02.13.947218v3>`_\n(`#43 <https://github.com/levitsky/pyteomics/pull/43>`_).\n- New function :py:func:`pyteomics.pylab_aux.mirror` for making a\n`spectrum_utils <https://github.com/bittremieux/spectrum_utils>`_ mirror plot.\n+ - :py:func:`pyteomics.pylab_aux.plot_spectrum` and :py:func:`pyteomics.pylab_aux.annotate_spectrum` now\n+ always return :py:class:`matplotlib.pyplot.Axes`.\n4.4.2\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -339,8 +339,10 @@ def _default_plot_spectrum(spectrum, *args, **kwargs):\nkwargs.setdefault('width', 0)\nkwargs.setdefault('linewidth', 1)\nkwargs.setdefault('edgecolor', 'k')\n- return ax.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n- return ax.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ ax.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ else:\n+ ax.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ return ax\ndef _spectrum_utils_plot(spectrum, *args, **kwargs):\n@@ -406,6 +408,10 @@ def plot_spectrum(spectrum, *args, **kwargs):\n(the default is :py:const:`None`, which means that no scaling\nrelative to the most intense peak will be performed).\nOnly works with `spectrum_utils` and `spectrum_utils.iplot` backends.\n+\n+ Returns\n+ -------\n+ out : matplotlib.pyplot.Axes\n\"\"\"\nbname = kwargs.pop('backend', 'default')\nbackend = _plot_backends.get(bname)\n@@ -735,6 +741,9 @@ def annotate_spectrum(spectrum, peptide, *args, **kwargs):\n.. note::\nTo apply static modifications, provide `aa_mass` with modified masses.\n+ Returns\n+ -------\n+ out : matplotlib.pyplot.Axes\n\"\"\"\nbname = kwargs.pop('backend', 'default')\nbackend = _annotation_backends.get(bname)\n@@ -773,6 +782,10 @@ def mirror(spec_top, spec_bottom, peptide=None, spectrum_kws=None, ax=None, **kw\nThe title. Empty by default.\n**kwargs : same as for :py:func:`annotate_spectrum` for `spectrum_utils` backends.\n+\n+ Returns\n+ -------\n+ out : matplotlib.pyplot.Axes\n\"\"\"\nspec_gen = _spectrum_utils_create_spectrum if peptide is None else _spectrum_utils_annotate_spectrum\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.5b2'\n+__version__ = '4.5b3'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Return Axes from plot_spectrum and annotate_spectrum |
377,522 | 15.09.2021 23:33:54 | -10,800 | 170ffaf4d37766606df589e13bd203c1ca8500f6 | Update pyteomics URL in warning | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -344,7 +344,7 @@ class XML(FileReader):\n\"`read_schema=False`.\\n\"\n\"If you think this shouldn't have happened, please \"\n\"report this to\\n\"\n- \"http://hg.theorchromo.ru/pyteomics/issues\\n\"\n+ \"http://github.com/levitsky/pyteomics/issues\\n\"\n\"\".format(self, version, schema_url, format_exc()))\nret = self._default_schema\nreturn ret\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update pyteomics URL in warning |
377,522 | 15.09.2021 23:42:17 | -10,800 | 4f2ef41ba9e1d5282c5c696b96d81ff1bdd888b5 | Add a warning when writing to existing file in default mode | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.5b2\n+4.5b3\n-----\n- Add support for `mzMLb <https://www.biorxiv.org/content/10.1101/2020.02.13.947218v3>`_\n(`#43 <https://github.com/levitsky/pyteomics/pull/43>`_).\n- New function :py:func:`pyteomics.pylab_aux.mirror` for making a\n`spectrum_utils <https://github.com/bittremieux/spectrum_utils>`_ mirror plot.\n+ - Add a warning when passing an existing file by name in writing functions.\n+ The default mode for output files will change from `'a'` to `'w'` in a future version.\n4.4.2\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -51,6 +51,7 @@ except ImportError:\nfrom .structures import PyteomicsError\nfrom .utils import add_metaclass\n+\ndef _keepstate(func):\n\"\"\"Decorator to help keep the position in open files passed as\npositional arguments to functions\"\"\"\n@@ -575,14 +576,32 @@ def _file_writer(_mode='a'):\n\"\"\"\n@wraps(_func)\ndef helper(*args, **kwargs):\n- m = kwargs.pop('file_mode', _mode)\n+ if 'file_mode' in kwargs:\n+ m = kwargs.pop('file_mode')\n+ warn = False\n+ else:\n+ m = _mode\n+ warn = True\nenc = kwargs.pop('encoding', None)\nif len(args) > 1:\n- with _file_obj(args[1], m, encoding=enc) as out:\n- return _func(args[0], out, *args[2:], **kwargs)\n+ out_arg = args[1]\n+ else:\n+ out_arg = kwargs.pop('output', None)\n+\n+ # warn about the change in default mode if an existing file name is given\n+ if isinstance(out_arg, basestring) and warn and os.path.exists(out_arg):\n+ warnings.warn(\"Opening an existing file in append mode. \"\n+ \"The default mode will change from 'a' to 'w' in a future version. \"\n+ \"Pass `file_mode='a'` to keep old behavior and suppress this warning.\", FutureWarning)\n+\n+ with _file_obj(out_arg, m, encoding=enc) as out:\n+ if len(args) > 1:\n+ call_args = (args[0], out) + args[2:]\n+ call_kwargs = kwargs\nelse:\n- with _file_obj(kwargs.pop('output', None), m, encoding=enc) as out:\n- return _func(*args, output=out, **kwargs)\n+ call_args = args\n+ call_kwargs = dict(output=out, **kwargs)\n+ return _func(*call_args, **call_kwargs)\nreturn helper\nreturn decorator\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/version.py",
"new_path": "pyteomics/version.py",
"diff": "@@ -13,7 +13,7 @@ Constants\n\"\"\"\n-__version__ = '4.5b2'\n+__version__ = '4.5b3'\nfrom collections import namedtuple\nimport re\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add a warning when writing to existing file in default mode |