author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
377,522 | 19.07.2018 21:49:44 | -10,800 | 8f47532c8e03876ee722ec912673409b3cfbf5e3 | Reduce code duplication with a class decorator | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -374,16 +374,25 @@ class UniProtMixin(FlavoredMixin):\n_intify(info, ('PE', 'SV'))\nreturn info\n-class UniProt(UniProtMixin, FASTA):\n+\n+def _add_init(cls):\n+ \"\"\"Add and __init__ method to a flavored parser class,\n+ which simply calls __init__ of its two bases.\"\"\"\n+ flavor, typ = cls.__bases__\ndef __init__(self, source, parse=True, **kwargs):\n- FASTA.__init__(self, source, **kwargs)\n- UniProtMixin.__init__(self, parse)\n+ typ.__init__(self, source, **kwargs)\n+ flavor.__init__(self, parse)\n+ cls.__init__ = __init__\n+ return cls\n+\n+@_add_init\n+class UniProt(UniProtMixin, FASTA):\n+ pass\n+@_add_init\nclass IndexedUniProt(UniProtMixin, TwoLayerIndexedFASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n- UniProtMixin.__init__(self, parse)\n+ pass\nclass UniRefMixin(FlavoredMixin):\nheader_pattern = r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\n@@ -400,16 +409,14 @@ class UniRefMixin(FlavoredMixin):\nreturn info\n+@_add_init\nclass UniRef(UniRefMixin, FASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- FASTA.__init__(self, source, **kwargs)\n- UniRefMixin.__init__(self, parse)\n+ pass\n+@_add_init\nclass IndexedUniRef(UniRefMixin, TwoLayerIndexedFASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n- UniRefMixin.__init__(self, parse)\n+ pass\nclass UniParcMixin(FlavoredMixin):\n@@ -420,16 +427,14 @@ class UniParcMixin(FlavoredMixin):\nreturn {'id': ID, 'status': status}\n+@_add_init\nclass UniParc(UniParcMixin, FASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- FASTA.__init__(self, source, **kwargs)\n- UniParcMixin.__init__(self, parse)\n+ pass\n+@_add_init\nclass IndexedUniParc(UniParcMixin, TwoLayerIndexedFASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n- UniParcMixin.__init__(self, parse)\n+ pass\nclass UniMesMixin(FlavoredMixin):\n@@ -444,16 +449,14 @@ class UniMesMixin(FlavoredMixin):\nreturn info\n+@_add_init\nclass UniMes(UniMesMixin, FASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- FASTA.__init__(self, source, **kwargs)\n- UniMesMixin.__init__(self, parse)\n+ pass\n+@_add_init\nclass IndexedUniMes(UniMesMixin, TwoLayerIndexedFASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n- UniMesMixin.__init__(self, parse)\n+ pass\nclass SPDMixin(FlavoredMixin):\n@@ -466,16 +469,14 @@ class SPDMixin(FlavoredMixin):\n'taxon': taxon, 'gene_id': gid}\n+@_add_init\nclass SPD(SPDMixin, FASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- FASTA.__init__(self, source, **kwargs)\n- SPDMixin.__init__(self, parse)\n+ pass\n+@_add_init\nclass IndexedSPD(SPDMixin, TwoLayerIndexedFASTA):\n- def __init__(self, source, parse=True, **kwargs):\n- TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n- SPDMixin.__init__(self, parse)\n+ pass\ndef read(source=None, use_index=False, flavor=None, **kwargs):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Reduce code duplication with a class decorator |
377,522 | 20.07.2018 00:15:10 | -10,800 | 479556878748bf0ae9a77e7321c1aabc78632c3d | Add ncbi fasta parser | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -27,6 +27,7 @@ dev\n- :py:class:`pyteomics.fasta.UniRef`;\n- :py:class:`pyteomics.fasta.UniMes`;\n- :py:class:`pyteomics.fasta.SPD`;\n+ - :py:class:`pyteomics.fasta.NCBI`;\n- :py:class:`pyteomics.fasta.IndexedFASTA` - binary-mode, indexing parser.\nSupports direct indexing by header string;\n@@ -39,6 +40,10 @@ dev\n- :py:class:`pyteomics.fasta.IndexedUniRef`;\n- :py:class:`pyteomics.fasta.IndexedUniMes`;\n- :py:class:`pyteomics.fasta.IndexedSPD`;\n+ - :py:class:`pyteomics.fasta.IndexedNCBI`.\n+\n+ :py:func:`pyteomics.fasta.read` now returns an instance of one of these classes,\n+ depending on the arguments `use_index` and `flavor`.\n3.5.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -38,7 +38,8 @@ Available classes:\n:py:class:`UniParc` and :py:class:`IndexedUniParc`,\n:py:class:`UniMes` and :py:class:`IndexedUniMes`,\n:py:class:`UniRef` and :py:class:`IndexedUniRef`,\n- :py:class:`SPD` and :py:class:`IndexedSPD` - format-specific parsers.\n+ :py:class:`SPD` and :py:class:`IndexedSPD`,\n+ :py:class:`NCBI` and :py:class:`IndexedNCBI` - format-specific parsers.\nFunctions\n.........\n@@ -503,6 +504,24 @@ class IndexedSPD(SPDMixin, TwoLayerIndexedFASTA):\npass\n+class NCBIMixin():\n+ header_pattern = r'^(\\S+)\\s+(.*\\S)\\s+\\[(.*)\\]'\n+\n+ def parser(self, header):\n+ ID, description, organism = re.match(self.header_pattern, header).groups()\n+ return {'id': ID, 'description': description, 'taxon': organism}\n+\n+\n+@_add_init\n+class NCBI(NCBIMixin, FASTA):\n+ pass\n+\n+\n+@_add_init\n+class IndexedNCBI(NCBIMixin, TwoLayerIndexedFASTA):\n+ pass\n+\n+\ndef read(source=None, use_index=False, flavor=None, **kwargs):\n\"\"\"Parse a FASTA file. This function serves as a dispatcher between\ndifferent parsers available in this module.\n@@ -806,13 +825,15 @@ def _intify(d, keys):\nstd_parsers = {'uniprot': (UniProt, IndexedUniProt), 'uniref': (UniRef, IndexedUniRef),\n'uniparc': (UniParc, IndexedUniParc), 'unimes': (UniMes, IndexedUniMes),\n- 'spd': (SPD, IndexedSPD), None: (FASTA, IndexedFASTA)}\n+ 'spd': (SPD, IndexedSPD), 'ncbi': (NCBI, IndexedNCBI),\n+ None: (FASTA, IndexedFASTA)}\n\"\"\"A dictionary with parsers for known FASTA header formats. For now, supported\nformats are those described at\n`UniProt help page <http://www.uniprot.org/help/fasta-headers>`_.\"\"\"\n_std_mixins = {'uniprot': UniProtMixin, 'uniref': UniRefMixin,\n- 'uniparc': UniParcMixin, 'unimes': UniMesMixin, 'spd': SPDMixin}\n+ 'uniparc': UniParcMixin, 'unimes': UniMesMixin, 'spd': SPDMixin,\n+ 'ncbi': NCBIMixin}\ndef parse(header, flavor='auto', parsers=None):\n\"\"\"Parse the FASTA header and return a nice dictionary.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -233,5 +233,12 @@ class FastaTest(unittest.TestCase):\n'taxon': 'HUMAN'}\nself.assertEqual(fasta.parse(header), parsed)\n+ def test_parser_ncbi(self):\n+ header = '>NP_001351877.1 acylglycerol kinase, mitochondrial isoform 2 [Homo sapiens]'\n+ parsed = {'description': 'acylglycerol kinase, mitochondrial isoform 2',\n+ 'id': 'NP_001351877.1',\n+ 'taxon': 'Homo sapiens'}\n+ self.assertEqual(fasta.parse(header), parsed)\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add ncbi fasta parser |
377,522 | 20.07.2018 21:53:37 | -10,800 | c931cb4eeac64b17980fffb2cc20894a9a95c460 | Unify mode checking in mgf.read and fasta.read | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -15,7 +15,8 @@ from .constants import _nist_mass\nfrom .file_helpers import (\n_file_obj, _keepstate, _keepstate_method, IteratorContextManager,\n- FileReader, IndexedTextReader, _file_reader, _file_writer, _make_chain)\n+ FileReader, IndexedTextReader, _file_reader, _file_writer, _make_chain,\n+ _check_use_index)\nfrom .math import (\nlinear_regression, linear_regression_perpendicular,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -4,6 +4,8 @@ import re\nfrom functools import wraps\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\n+import warnings\n+warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\ntry:\nbasestring\n@@ -348,3 +350,20 @@ def _make_chain(reader, readername, full_output=False):\ndispatch.from_iterable = dispatch_from_iterable\nreturn dispatch\n+\n+def _check_use_index(source, use_index, default):\n+ if use_index is not None:\n+ use_index = bool(use_index)\n+ if 'b' not in getattr(source, 'mode', 'b'):\n+ if use_index is True:\n+ warnings.warn('use_index is True, but the file mode is not binary. '\n+ 'Setting use_index to False')\n+ use_index = False\n+ elif 'b' in getattr(source, 'mode', ''):\n+ if use_index is False:\n+ warnings.warn('use_index is False, but the file mode is binary. '\n+ 'Setting use_index to True')\n+ use_index = True\n+ if use_index is None:\n+ use_index = default\n+ return use_index\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -111,6 +111,7 @@ from collections import namedtuple\nimport re\nfrom . import auxiliary as aux\n+\nProtein = namedtuple('Protein', ('description', 'sequence'))\nclass FASTABase():\n@@ -522,7 +523,7 @@ class IndexedNCBI(NCBIMixin, TwoLayerIndexedFASTA):\npass\n-def read(source=None, use_index=False, flavor=None, **kwargs):\n+def read(source=None, use_index=None, flavor=None, **kwargs):\n\"\"\"Parse a FASTA file. This function serves as a dispatcher between\ndifferent parsers available in this module.\n@@ -552,6 +553,7 @@ def read(source=None, use_index=False, flavor=None, **kwargs):\nexcept KeyError:\nraise aux.PyteomicsError('No parser for flavor: {}. Supported flavors: {}'.format(\nflavor, ', '.join(map(str, std_parsers))))\n+ use_index = aux._check_use_index(source, use_index, False)\nreturn parser[use_index](source, **kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -9,13 +9,15 @@ Summary\nhuman-readable format for MS/MS data. It allows storing MS/MS peak lists and\nexprimental parameters.\n-This module provides minimalistic infrastructure for access to data stored in\n-MGF files. The most important function is :py:func:`read`, which\n-reads spectra and related information as saves them into human-readable\n-:py:class:`dicts`.\n+This module provides classes and functions for access to data stored in\n+MGF files.\n+Parsing is done using :py:class:`MGF` and :py:class:`IndexedMGF` classes.\n+The :py:func:`read` function can be used as an entry point.\n+MGF spectra are converted to dictionaries. MS/MS data points are\n+(optionally) represented as :py:mod:`numpy` arrays.\nAlso, common parameters can be read from MGF file header with\n-:py:func:`read_header` function. :py:func:`write` allows creation of MGF\n-files.\n+:py:func:`read_header` function.\n+:py:func:`write` allows creation of MGF files.\nClasses\n-------\n@@ -25,7 +27,7 @@ Classes\n:py:class:`IndexedMGF` - a binary-mode MGF parser. When created, builds a byte offset index\nfor fast random access by spectrum titles. Sequential iteration is also supported.\n- Needs a seekable file opened in binary mode (or will open it if given a file name).\n+ Needs a seekable file opened in binary mode (if created from existing file object).\n:py:class:`MGFBase` - abstract class, the common ancestor of the two classes above.\nCan be used for type checking.\n@@ -65,15 +67,13 @@ Functions\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-from . import auxiliary as aux\ntry:\nimport numpy as np\nexcept ImportError:\nnp = None\nimport itertools as it\nimport sys\n-import warnings\n-warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\n+from . import auxiliary as aux\nclass MGFBase():\n\"\"\"Abstract class representing an MGF file. Subclasses implement different approaches to parsing.\"\"\"\n@@ -261,7 +261,7 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\nreturn self._read_header_lines(header_lines)\ndef _read(self, **kwargs):\n- for spec, offsets in self._offset_index.items():\n+ for _, offsets in self._offset_index.items():\nspectrum = self._read_spectrum(*offsets)\nyield spectrum\n@@ -305,7 +305,8 @@ class MGF(aux.FileReader, MGFBase):\n\"\"\"\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\n+ dtype=None, encoding=None):\naux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\nMGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\nself.encoding = encoding\n@@ -389,13 +390,10 @@ def read(*args, **kwargs):\nsource = args[0]\nelse:\nsource = kwargs.get('source')\n- use_index = kwargs.pop('use_index', True)\n- if 'b' in getattr(source, 'mode', 'b') and use_index:\n- tp = IndexedMGF\n- else:\n- if use_index:\n- warnings.warn('use_index is True, but the file mode is not binary. Setting use_index to False')\n- tp = MGF\n+ use_index = kwargs.pop('use_index', None)\n+ use_index = aux._check_use_index(source, use_index, True)\n+ tp = IndexedMGF if use_index else MGF\n+\nreturn tp(*args, **kwargs)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Unify mode checking in mgf.read and fasta.read |
377,522 | 14.08.2018 00:02:49 | -10,800 | c73644b0b8f338aded599e9473681315cc0dd05c | Draft implementation of multiprocessing
Change XML byte offset indexes (TagSpecificXMLByteIndex) to use strings; ByteCountingXMLScanner still uses bytes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -16,7 +16,7 @@ from .constants import _nist_mass\nfrom .file_helpers import (\n_file_obj, _keepstate, _keepstate_method, IteratorContextManager,\nFileReader, IndexedTextReader, _file_reader, _file_writer, _make_chain,\n- _check_use_index)\n+ _check_use_index, FileReadingProcess, TaskMappingMixin, serializer)\nfrom .math import (\nlinear_regression, linear_regression_perpendicular,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -4,6 +4,8 @@ import re\nfrom functools import wraps\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\n+import multiprocessing as mp\n+import threading\nimport warnings\nwarnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\n@@ -22,6 +24,24 @@ try:\nexcept ImportError:\nnp = None\n+try:\n+ import dill\n+except ImportError:\n+ dill = None\n+ try:\n+ import cPickle as pickle\n+ except ImportError:\n+ import pickle\n+ serializer = pickle\n+else:\n+ serializer = dill\n+\n+try:\n+ from queue import Empty\n+except ImportError:\n+ from Queue import Empty\n+\n+from . import PyteomicsError\ndef _keepstate(func):\n\"\"\"Decorator to help keep the position in open files passed as\n@@ -290,7 +310,8 @@ def _file_reader(_mode='r'):\n@wraps(_func)\ndef helper(*args, **kwargs):\nif args:\n- return FileReader(args[0], _mode, _func, True, args[1:], kwargs, kwargs.pop('encoding', None))\n+ return FileReader(args[0], _mode, _func, True, args[1:], kwargs,\n+ kwargs.pop('encoding', None))\nsource = kwargs.pop('source', None)\nreturn FileReader(source, _mode, _func, True, (), kwargs, kwargs.pop('encoding', None))\nreturn helper\n@@ -321,7 +342,6 @@ def _make_chain(reader, readername, full_output=False):\nresults = [reader(arg, **kwargs) for arg in args]\nif pd is not None and all(isinstance(a, pd.DataFrame) for a in args):\nreturn pd.concat(results)\n- else:\nreturn np.concatenate(results)\ndef _iter(files, kwargs):\n@@ -350,7 +370,6 @@ def _make_chain(reader, readername, full_output=False):\ndef dispatch_from_iterable(args, **kwargs):\nif kwargs.get('full_output', full_output):\nreturn concat_results(*args, **kwargs)\n- else:\nreturn _chain(*args, **kwargs)\ndispatch.__doc__ = \"\"\"Chain :py:func:`{0}` for several files.\n@@ -385,3 +404,96 @@ def _check_use_index(source, use_index, default):\nif use_index is None:\nuse_index = default\nreturn use_index\n+\n+\n+class FileReadingProcess(mp.Process):\n+ \"\"\"Process that does a share of distributed work on entries read from file.\n+ Reconstructs a reader object, parses an entries from given indexes,\n+ optionally does additional processing, sends results back.\n+\n+ The reader class must support the :py:meth:`__getitem__` dict-like lookup.\n+ \"\"\"\n+ def __init__(self, reader_spec, target_spec, qin, qout, done_flag, args_spec, kwargs_spec):\n+ self.reader = serializer.loads(reader_spec)\n+ fname = getattr(self.reader, 'name', self.reader.__class__.__name__)\n+ target = serializer.loads(target_spec)\n+ tname = getattr(target, '__name__', '<?>')\n+ super(FileReadingProcess, self).__init__(target=target,\n+ name='Process-{}-{}'.format(fname, tname),\n+ args=serializer.loads(args_spec),\n+ kwargs=serializer.loads(kwargs_spec))\n+ self._qin = qin\n+ self._qout = qout\n+ # self._in_flag = in_flag\n+ self._done_flag = done_flag\n+\n+ def run(self):\n+ for key in iter(self._qin.get, None):\n+ item = self.reader[key]\n+ if self._target is not None:\n+ result = self._target(item, *self._args, **self._kwargs)\n+ else:\n+ result = item\n+ self._qout.put(result)\n+ self._done_flag.set()\n+\n+ def is_done(self):\n+ return self._done_flag.is_set()\n+\n+try:\n+ _NPROC = mp.cpu_count()\n+except NotImplementedError:\n+ _NPROC = 4\n+\n+class TaskMappingMixin(object):\n+ def map(self, iterator=None, target=None, processes=-1, *args, **kwargs):\n+ if iterator is None:\n+ iterator = self._default_iterator()\n+ if processes < 1:\n+ processes = _NPROC\n+ serialized = []\n+ for obj, objname in [(self, 'reader'),\n+ (target, 'target'), (args, 'args'), (kwargs, 'kwargs')]:\n+ try:\n+ serialized.append(serializer.dumps(obj))\n+ except serializer.PicklingError:\n+ msg = 'Could not serialize {0} {1} with {2.__name__}.'.format(\n+ objname, obj, serializer)\n+ if serializer is not dill:\n+ msg += ' Try installing `dill`.'\n+ raise PyteomicsError(msg)\n+ reader_spec, target_spec, args_spec, kwargs_spec = serialized\n+\n+ done_event = mp.Event()\n+ in_queue = mp.Queue(10000)\n+ out_queue = mp.Queue(1000)\n+\n+ workers = []\n+ for _ in range(processes):\n+ worker = FileReadingProcess(\n+ reader_spec, target_spec, in_queue, out_queue, done_event, args_spec, kwargs_spec)\n+ workers.append(worker)\n+\n+ def feeder():\n+ for key in iterator:\n+ in_queue.put(key)\n+ for _ in range(processes):\n+ in_queue.put(None)\n+\n+ feeder_thread = threading.Thread(target=feeder)\n+ feeder_thread.daemon = True\n+ feeder_thread.start()\n+ for worker in workers:\n+ worker.start()\n+ while True:\n+ try:\n+ result = out_queue.get(True, 5)\n+ yield result\n+ except Empty:\n+ if all(w.is_done() for w in workers):\n+ break\n+ else:\n+ continue\n+ feeder_thread.join()\n+ for worker in workers:\n+ worker.join()\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -979,7 +979,7 @@ class Unimod():\nelements = [x.attrib for x in self._xpath('/unimod/elements/elem')]\navg = {}\nfor elem in elements:\n- i, label = re.match('^(\\d*)(\\D+)$', elem['title']).groups()\n+ i, label = re.match(r'^(\\d*)(\\D+)$', elem['title']).groups()\nif not i:\niso = 0\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -65,9 +65,9 @@ This module requires :py:mod:`lxml` and :py:mod:`numpy`.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-import numpy as np\nimport re\nimport warnings\n+import numpy as np\nfrom . import xml, auxiliary as aux, _schema_defaults\nfrom .xml import etree\n@@ -90,14 +90,14 @@ STANDARD_ARRAYS = set([\n])\n-class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\n+class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML, xml.MultiProcessingXML):\n\"\"\"Parser class for mzML files.\"\"\"\nfile_format = 'mzML'\n_root_element = 'mzML'\n_default_schema = _schema_defaults._mzml_schema_defaults\n_default_version = '1.1.0'\n_default_iter_tag = 'spectrum'\n- _structures_to_flatten = {'binaryDataArrayList', \"referenceableParamGroupRef\"}\n+ _structures_to_flatten = {'binaryDataArrayList', 'referenceableParamGroupRef'}\n_indexed_tags = {'spectrum', 'chromatogram'}\ndef __init__(self, *args, **kwargs):\n@@ -162,10 +162,9 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\n# can report it as such. Otherwise fall back\n# to \"binary\". This fallback signals special\n# behavior elsewhere.\n- elif n_candidates == 0:\n+ if n_candidates == 0:\nif is_non_standard:\nreturn NON_STANDARD_DATA_ARRAY\n- else:\nreturn \"binary\"\n# Multiple choices means we need to make a decision which could\n# mask data from the user. This should never happen but stay safe.\n@@ -177,7 +176,6 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nstandard_options = set(candidates) & STANDARD_ARRAYS\nif standard_options:\nreturn max(standard_options, key=len)\n- else:\nreturn max(candidates, key=len)\ndef _determine_array_dtype(self, info):\n@@ -206,7 +204,6 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nif len(found_compression_types) == 1:\ndel info[found_compression_types[0]]\nreturn found_compression_types[0]\n- else:\nwarnings.warn(\"Multiple options for binary array compression: %r\" % (\nfound_compression_types,))\nreturn found_compression_types[0]\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -28,27 +28,29 @@ This module requres :py:mod:`lxml` and :py:mod:`numpy`.\n# limitations under the License.\nimport re\n-import warnings\n-warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\nimport socket\nfrom traceback import format_exc\nimport operator as op\nimport ast\nimport os\nimport json\n-import numpy as np\n-from lxml import etree\n+import warnings\nfrom collections import OrderedDict, defaultdict\n+from lxml import etree\n+import numpy as np\n+\nfrom .auxiliary import FileReader, PyteomicsError, basestring, _file_obj\nfrom .auxiliary import unitint, unitfloat, unitstr, cvstr\nfrom .auxiliary import _keepstate_method as _keepstate\nfrom .auxiliary import BinaryDataArrayTransformer\n+from .auxiliary import TaskMappingMixin\ntry: # Python 2.7\nfrom urllib2 import urlopen, URLError\nexcept ImportError: # Python 3.x\nfrom urllib.request import urlopen, URLError\n+warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\ndef _local_name(element):\n\"\"\"Strip namespace from the XML element's name\"\"\"\n@@ -725,7 +727,7 @@ class ByteCountingXMLScanner(_file_obj):\nrunning = True\nwhile running:\nbuff = f.read(read_size)\n- if len(buff) == 0:\n+ if not buff:\nrunning = False\nbuff = tail\nelse:\n@@ -775,8 +777,8 @@ class ByteCountingXMLScanner(_file_obj):\nReturns\n-------\n- defaultdict(ByteEncodingOrderedDict)\n- Mapping from tag type to ByteEncodingOrderedDict from identifier to byte offset\n+ defaultdict(dict)\n+ Mapping from tag type to dict from identifier to byte offset\n\"\"\"\nif lookup_id_key_mapping is None:\nlookup_id_key_mapping = {}\n@@ -785,10 +787,11 @@ class ByteCountingXMLScanner(_file_obj):\nlookup_id_key_mapping.setdefault(name, \"id\")\nlookup_id_key_mapping[name] = ensure_bytes_single(lookup_id_key_mapping[name])\n- indices = defaultdict(ByteEncodingOrderedDict)\n+ indices = defaultdict(dict)\ng = self._generate_offsets()\nfor offset, offset_type, attrs in g:\n- indices[offset_type][attrs[lookup_id_key_mapping[offset_type]]] = offset\n+ indices[offset_type.decode('utf-8')][\n+ attrs[lookup_id_key_mapping[offset_type]].decode('utf-8')] = offset\nreturn indices\n@classmethod\n@@ -831,7 +834,7 @@ class TagSpecificXMLByteIndex(object):\nself.indexed_tags = indexed_tags\nself.indexed_tag_keys = keys\nself.source = source\n- self.offsets = defaultdict(ByteEncodingOrderedDict)\n+ self.offsets = defaultdict(dict)\nself.build_index()\ndef __getstate__(self):\n@@ -882,18 +885,12 @@ class FlatTagSpecificXMLByteIndex(TagSpecificXMLByteIndex):\nAttributes\n----------\n- offsets : ByteEncodingOrderedDict\n+ offsets : dict\nThe mapping between ids and byte offsets.\n\"\"\"\ndef build_index(self):\nhierarchical_index = super(FlatTagSpecificXMLByteIndex, self).build_index()\n- flat_index = []\n-\n- for tag_type in hierarchical_index.values():\n- flat_index.extend(tag_type.items())\n-\n- flat_index.sort(key=lambda x: x[1])\n- self.offsets = ByteEncodingOrderedDict(flat_index)\n+ self.offsets = _flatten_map(hierarchical_index)\nreturn self.offsets\ndef __len__(self):\n@@ -921,7 +918,7 @@ def _flatten_map(hierarchical_map):\nall_records.extend(records.items())\nall_records.sort(key=lambda x: x[1])\n- return ByteEncodingOrderedDict(all_records)\n+ return OrderedDict(all_records)\nclass IndexedXML(XML):\n@@ -931,7 +928,8 @@ class IndexedXML(XML):\n_indexed_tags = set()\n_indexed_tag_keys = {}\n- def __init__(self, source, read_schema=False, iterative=True, build_id_cache=False, use_index=True, *args, **kwargs):\n+ def __init__(self, source, read_schema=False, iterative=True, build_id_cache=False,\n+ use_index=True, *args, **kwargs):\n\"\"\"Create an XML parser object.\nParameters\n@@ -969,16 +967,16 @@ class IndexedXML(XML):\nself._use_index = use_index\n- self._indexed_tags = ensure_bytes(self._indexed_tags)\n- self._indexed_tag_keys = {\n- ensure_bytes_single(k): ensure_bytes_single(v)\n- for k, v in self._indexed_tag_keys.items()\n- }\n+ # self._indexed_tags = ensure_bytes(self._indexed_tags)\n+ # self._indexed_tag_keys = {\n+ # ensure_bytes_single(k): ensure_bytes_single(v)\n+ # for k, v in self._indexed_tag_keys.items()\n+ # }\nif use_index:\nbuild_id_cache = False\nsuper(IndexedXML, self).__init__(source, read_schema, iterative, build_id_cache, *args, **kwargs)\n- self._offset_index = ByteEncodingOrderedDict()\n+ self._offset_index = OrderedDict()\nself._build_index()\ndef __reduce_ex__(self, protocol):\n@@ -1050,13 +1048,29 @@ class IndexedXML(XML):\nreturn self.get_by_id(elem_id)\n+class MultiProcessingXML(TaskMappingMixin, IndexedXML):\n+ \"\"\"XML reader that feeds indexes to external processes\n+ for parallel parsing and analysis of XML entries.\"\"\"\n+\n+ def _build_index(self):\n+ super(MultiProcessingXML, self)._build_index()\n+ self._hierarchical_offset_index = TagSpecificXMLByteIndex(\n+ self._source, self._indexed_tags, self._indexed_tag_keys)\n+\n+ def map(self, target=None, processes=-1, tag=None, *args, **kwargs):\n+ if tag is None:\n+ tag = self._default_iter_tag\n+ iterator = iter(self._hierarchical_offset_index[tag])\n+ return super(MultiProcessingXML, self).map(iterator, target, processes, *args, **kwargs)\n+\n+\ndef save_byte_index(index, fp):\n\"\"\"Write the byte offset index to the provided\nfile\nParameters\n----------\n- index : ByteEncodingOrderedDict\n+ index : OrderedDict\nThe byte offset index to be saved\nfp : file\nThe file to write the index to\n@@ -1067,7 +1081,7 @@ def save_byte_index(index, fp):\n\"\"\"\nencoded_index = dict()\nfor key, offset in index.items():\n- encoded_index[key.decode(\"utf8\")] = offset\n+ encoded_index[key] = offset\njson.dump(encoded_index, fp)\nreturn fp\n@@ -1082,10 +1096,10 @@ def load_byte_index(fp):\nReturns\n-------\n- ByteEncodingOrderedDict\n+ OrderedDict\n\"\"\"\ndata = json.load(fp)\n- index = ByteEncodingOrderedDict()\n+ index = OrderedDict()\nfor key, value in sorted(data.items(), key=lambda x: x[1]):\nindex[key] = value\nreturn index\n@@ -1097,7 +1111,7 @@ class PrebuiltOffsetIndex(FlatTagSpecificXMLByteIndex):\nAttributes\n----------\n- offsets : ByteEncodingOrderedDict\n+ offsets : OrderedDict\n\"\"\"\ndef __init__(self, offsets):\n@@ -1175,6 +1189,7 @@ class IndexSavingXML(IndexedXML):\nwith cls(path, use_index=True) as inst:\ninst.write_byte_offsets()\n+\nclass ArrayConversionMixin(BinaryDataArrayTransformer):\n_dtype_dict = {}\n_array_keys = ['m/z array', 'intensity array']\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1250,7 +1250,7 @@ mzml_spectra = [{'MSn spectrum': '',\n'count': 2,\n'defaultArrayLength': 19914,\n'highest observed m/z': 2000.0099466203771,\n- 'id': 'controllerType=0 controllerNumber=1 scan=1',\n+ 'id': 'controllerType=0 controllerNumber=1 scan=2',\n'index': 1,\n'intensity array': makeCA(mzml_int_array),\n'lowest observed m/z': 200.00018816645022,\n@@ -1298,7 +1298,7 @@ mzml_spectra_skip_empty_values = [{'base peak intensity': 1471973.875,\n'count': 2,\n'defaultArrayLength': 19914,\n'highest observed m/z': 2000.0099466203771,\n- 'id': 'controllerType=0 controllerNumber=1 scan=1',\n+ 'id': 'controllerType=0 controllerNumber=1 scan=2',\n'index': 1,\n'intensity array': makeCA(mzml_int_array),\n'lowest observed m/z': 200.00018816645022,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test.mzML",
"new_path": "tests/test.mzML",
"diff": "</binaryDataArray>\n</binaryDataArrayList>\n</spectrum>\n- <spectrum index=\"1\" id=\"controllerType=0 controllerNumber=1 scan=1\" defaultArrayLength=\"19914\">\n+ <spectrum index=\"1\" id=\"controllerType=0 controllerNumber=1 scan=2\" defaultArrayLength=\"19914\">\n<cvParam cvRef=\"MS\" accession=\"MS:1000511\" name=\"ms level\" value=\"1\"/>\n<cvParam cvRef=\"MS\" accession=\"MS:1000580\" name=\"MSn spectrum\" value=\"\"/>\n<cvParam cvRef=\"MS\" accession=\"MS:1000130\" name=\"positive scan\" value=\"\"/>\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "import os\nimport shutil\n-from os import path\nimport tempfile\nimport pyteomics\nfrom io import BytesIO\n-pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]\nfrom itertools import product\nimport unittest\nfrom pyteomics.mzml import MzML, PreIndexedMzML, read, chain\n@@ -12,7 +11,7 @@ from pyteomics import auxiliary as aux, xml\nfrom data import mzml_spectra, mzml_spectra_skip_empty_values\nimport numpy as np\nimport pickle\n-\n+import operator as op\nclass MzmlTest(unittest.TestCase):\nmaxDiff = None\n@@ -20,12 +19,18 @@ class MzmlTest(unittest.TestCase):\ndef test_read(self):\nfor rs, it, ui in product([True, False], repeat=3):\n+ if rs: continue # temporarily disable retrieval of schema\nfor func in [MzML, read, chain,\nlambda x, **kw: chain.from_iterable([x], **kw), PreIndexedMzML]:\nwith func(self.path, read_schema=rs, iterative=it, use_index=ui) as r:\n# http://stackoverflow.com/q/14246983/1258041\nself.assertEqual(mzml_spectra, list(r))\n+ def test_mp_read(self):\n+ key = op.itemgetter('index')\n+ with MzML(self.path) as f:\n+ self.assertEqual(sorted(mzml_spectra, key=key), sorted(list(f.map()), key=key))\n+\ndef test_read_skip_empty_values(self):\nwith MzML(self.path, skip_empty_cvparam_values=True) as r:\nself.assertEqual(mzml_spectra_skip_empty_values, list(r))\n@@ -70,26 +75,26 @@ class MzmlTest(unittest.TestCase):\ndef test_prebuild_index(self):\ntest_dir = tempfile.mkdtemp()\n- work_path = path.join(test_dir, self.path)\n+ work_path = os.path.join(test_dir, self.path)\nwith open(work_path, 'w') as dest, open(self.path) as source:\ndest.write(source.read())\nassert dest.closed\nwith MzML(work_path, use_index=True) as inst:\n- offsets_exist = path.exists(inst._byte_offset_filename)\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.FlatTagSpecificXMLByteIndex))\nself.assertTrue(not isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\nself.assertTrue(inst._source.closed)\nMzML.prebuild_byte_offset_file(work_path)\nwith MzML(work_path, use_index=True) as inst:\n- offsets_exist = path.exists(inst._byte_offset_filename)\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\nself.assertTrue(offsets_exist)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\nself.assertTrue(inst._source.closed)\nos.remove(inst._byte_offset_filename)\nwith MzML(work_path, use_index=True) as inst:\n- offsets_exist = path.exists(inst._byte_offset_filename)\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.FlatTagSpecificXMLByteIndex))\nself.assertTrue(not isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Draft implementation of multiprocessing
Change XML byte offset indexes (TagSpecificXMLByteIndex) to use strings; ByteCountingXMLScanner still uses bytes |
377,522 | 14.08.2018 00:47:26 | -10,800 | 1d4f65516a7be9f0034854b9fc8329147360a3bf | Ensure the strings are encoded again in ByteCountingXMLScanner | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -782,10 +782,13 @@ class ByteCountingXMLScanner(_file_obj):\n\"\"\"\nif lookup_id_key_mapping is None:\nlookup_id_key_mapping = {}\n+ lookup_id_key_mapping = {ensure_bytes_single(key): ensure_bytes_single(value)\n+ for key, value in lookup_id_key_mapping.items()}\nfor name in self.indexed_tags:\n- lookup_id_key_mapping.setdefault(name, \"id\")\n- lookup_id_key_mapping[name] = ensure_bytes_single(lookup_id_key_mapping[name])\n+ bname = ensure_bytes_single(name)\n+ lookup_id_key_mapping.setdefault(bname, 'id')\n+ lookup_id_key_mapping[bname] = ensure_bytes_single(lookup_id_key_mapping[bname])\nindices = defaultdict(dict)\ng = self._generate_offsets()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Ensure the strings are encoded again in ByteCountingXMLScanner |
377,522 | 14.08.2018 00:47:47 | -10,800 | d3bf6ab8a8e3c608f9370c370894639ae4a934d1 | Make pepxml use multiprocessing | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -444,6 +444,7 @@ try:\n_NPROC = mp.cpu_count()\nexcept NotImplementedError:\n_NPROC = 4\n+_QUEUE_TIMEOUT = 4\nclass TaskMappingMixin(object):\ndef map(self, iterator=None, target=None, processes=-1, *args, **kwargs):\n@@ -487,7 +488,7 @@ class TaskMappingMixin(object):\nworker.start()\nwhile True:\ntry:\n- result = out_queue.get(True, 5)\n+ result = out_queue.get(True, _QUEUE_TIMEOUT)\nyield result\nexcept Empty:\nif all(w.is_done() for w in workers):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -97,13 +97,15 @@ This module requires :py:mod:`lxml`.\nfrom lxml import etree\nfrom . import xml, auxiliary as aux, _schema_defaults\n-class PepXML(xml.XML):\n+class PepXML(xml.MultiProcessingXML):\n\"\"\"Parser class for pepXML files.\"\"\"\nfile_format = 'pepXML'\n_root_element = 'msms_pipeline_analysis'\n_default_schema = _schema_defaults._pepxml_schema_defaults\n_default_version = '1.15'\n_default_iter_tag = 'spectrum_query'\n+ _indexed_tags = {'spectrum_query'}\n+ _indexed_tag_keys = {'spectrum_query': 'index'}\n_structures_to_flatten = {'search_score_summary', 'modification_info'}\n# attributes which contain unconverted values\n_convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff'},\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make pepxml use multiprocessing |
377,522 | 14.08.2018 23:56:42 | -10,800 | 21b80a7ede2ee1a8a8fdfec26e3bfbdcef8ffa30 | Make IndexedMGF picklable | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -219,7 +219,7 @@ class IndexedTextReader(FileReader):\nlabel_group = 1\ndef __init__(self, source, func, pass_file, args, kwargs, encoding='utf-8', block_size=None,\n- delimiter=None, label=None, label_group=None):\n+ delimiter=None, label=None, label_group=None, _skip_index=False):\n# the underlying _file_obj gets None as encoding\n# to avoid transparent decoding of StreamReader on read() calls\nsuper(IndexedTextReader, self).__init__(source, 'rb', func, pass_file, args, kwargs, encoding=None)\n@@ -232,8 +232,19 @@ class IndexedTextReader(FileReader):\nself.block_size = block_size\nif label_group is not None:\nself.label_group = label_group\n+ self._offset_index = None\n+ if not _skip_index:\nself._offset_index = self.build_byte_index()\n+ def __getstate__(self):\n+ state = super(IndexedTextReader, self).__getstate__()\n+ state['offset_index'] = self._offset_index\n+ return state\n+\n+ def __setstate__(self, state):\n+ super(IndexedTextReader, self).__setstate__(state)\n+ self._offset_index = state['offset_index']\n+\ndef _chunk_iterator(self):\nfh = self._source.file\ndelim = remove_bom(self.delimiter.encode(self.encoding))\n@@ -447,7 +458,7 @@ except NotImplementedError:\n_QUEUE_TIMEOUT = 4\nclass TaskMappingMixin(object):\n- def map(self, iterator=None, target=None, processes=-1, *args, **kwargs):\n+ def map(self, target=None, processes=-1, iterator=None, *args, **kwargs):\nif iterator is None:\niterator = self._default_iterator()\nif processes < 1:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -247,13 +247,31 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\n\"\"\"\ndelimiter = 'BEGIN IONS'\n- label = 'TITLE=([^\\n]+)\\n'\n+ label = r'TITLE=([^\\n]+)\\n'\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8',\n- block_size=1000000):\n- aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding, block_size)\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\n+ dtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n+ aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding,\n+ block_size, _skip_index=_skip_index)\nMGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\n+ def __reduce_ex__(self, protocol):\n+ return (self.__class__,\n+ (self._source_init, False, self._convert_arrays,\n+ self._read_charges, self._dtype_dict, self.encoding, self.block_size, True),\n+ self.__getstate__())\n+\n+ def __getstate__(self):\n+ state = super(IndexedMGF, self).__getstate__()\n+ state['use_header'] = self._use_header\n+ state['header'] = self._header\n+ return state\n+\n+ def __setstate__(self, state):\n+ super(IndexedMGF, self).__setstate__(state)\n+ self._use_header = state['use_header']\n+ self._header = state['header']\n+\n@aux._keepstate_method\ndef _read_header(self):\nfirst = next(v for v in self._offset_index.values())[0]\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -1064,7 +1064,7 @@ class MultiProcessingXML(TaskMappingMixin, IndexedXML):\nif tag is None:\ntag = self._default_iter_tag\niterator = iter(self._hierarchical_offset_index[tag])\n- return super(MultiProcessingXML, self).map(iterator, target, processes, *args, **kwargs)\n+ return super(MultiProcessingXML, self).map(target, processes, iterator, *args, **kwargs)\ndef save_byte_index(index, fp):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make IndexedMGF picklable |
377,522 | 15.08.2018 01:01:05 | -10,800 | f7aef0a9497e98ea2e23256c32521af07fd2e9c5 | Add multiprocessing for IndexedMGF | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -509,3 +509,6 @@ class TaskMappingMixin(object):\nfeeder_thread.join()\nfor worker in workers:\nworker.join()\n+\n+ def _default_iterator(self):\n+ return iter(self._offset_index.keys())\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -224,7 +224,7 @@ class MGFBase():\nreturn self.get_spectrum(key)\n-class IndexedMGF(aux.IndexedTextReader, MGFBase):\n+class IndexedMGF(aux.TaskMappingMixin, aux.IndexedTextReader, MGFBase):\n\"\"\"\nA class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\nparsing. Specific spectra can be accessed by title using the indexing syntax in constant time.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -4,6 +4,7 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\n+import pickle\nfrom pyteomics import mgf\nimport data\n@@ -116,5 +117,16 @@ class MGFTest(unittest.TestCase):\nself.assertEqual(data.mgf_spectra_long[1], f.get_spectrum(key))\nself.assertEqual(data.mgf_spectra_long[1], mgf.get_spectrum(self.path, key))\n+ def test_indexedmgf_picklable(self):\n+ with mgf.IndexedMGF(self.path) as reader:\n+ spec = pickle.dumps(reader)\n+ with pickle.loads(spec) as reader:\n+ self.assertEqual(data.mgf_spectra_long[0], next(reader))\n+\n+ def test_map(self):\n+ with mgf.IndexedMGF(self.path) as reader:\n+ spectra = sorted(list(reader.map()), key=lambda s: s['params']['title'])\n+ self.assertEqual(data.mgf_spectra_long, spectra)\n+\nif __name__ == \"__main__\":\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add multiprocessing for IndexedMGF |
377,522 | 15.08.2018 20:40:11 | -10,800 | 7eb748cfa90b619b51fbd2233feb36e996bdf46e | Add multiprocessing to fasta indexed classes | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": ":py:func:`pyteomics.fasta.read` now returns an instance of one of these classes,\ndepending on the arguments `use_index` and `flavor`.\n+ - Multiprocessing support: all indexed XML and text file parsers now expose a :py:meth:`map` method.\n+ This method can map a user-supplied function to each file entry in separate processes (or simply\n+ parallelize the parsing itself).\n+ The order of entries is not preserved in the output.\n+\n3.5.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -437,6 +437,7 @@ class FileReadingProcess(mp.Process):\nself._qout = qout\n# self._in_flag = in_flag\nself._done_flag = done_flag\n+ self.daemon = True\ndef run(self):\nfor key in iter(self._qin.get, None):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -212,7 +212,27 @@ class FASTA(aux.FileReader, FASTABase):\n'Use IndexedFASTA and its subclasses')\n-class IndexedFASTA(aux.IndexedTextReader, FASTABase):\n+def _reconstruct(cls, args, kwargs):\n+ return cls(*args, **kwargs, _skip_index=True)\n+\n+def _picklable(cls):\n+ def __init__(self, *args, **kwargs):\n+ cls.__init__(self, *args, **kwargs)\n+ self._init_args = args\n+ self._init_kwargs = kwargs\n+\n+ def __reduce_ex__(self, protocol):\n+ return (_reconstruct,\n+ (self.__class__, self._init_args, self._init_kwargs),\n+ self.__getstate__())\n+\n+ d = cls.__dict__.copy()\n+ d['__init__'] = __init__\n+ d['__reduce_ex__'] = __reduce_ex__\n+ return type(cls.__name__, cls.__bases__, d)\n+\n+@_picklable\n+class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\n\"\"\"Indexed FASTA parser. Supports direct indexing by matched labels.\"\"\"\ndelimiter = '>'\nlabel = r'^>(.*)'\n@@ -252,6 +272,11 @@ class IndexedFASTA(aux.IndexedTextReader, FASTABase):\naux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, **kwargs)\nFASTABase.__init__(self, ignore_comments, parser)\n+ def __reduce_ex__(self, protocol):\n+ return (_reconstruct,\n+ (self.__class__, self._init_args, self._init_kwargs),\n+ self.__getstate__())\n+\ndef _read_protein_lines(self, lines):\ndescription = []\nsequence = []\n@@ -290,7 +315,7 @@ class IndexedFASTA(aux.IndexedTextReader, FASTABase):\nif offsets is not None:\nreturn self._entry_from_offsets(*offsets)\n-\n+@_picklable\nclass TwoLayerIndexedFASTA(IndexedFASTA):\n\"\"\"Parser with two-layer index. Extracted groups are mapped to full headers (where possible),\nfull headers are mapped to byte offsets.\n@@ -334,6 +359,7 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nself.header_group = header_group\nif header_pattern is not None:\nself.header_pattern = header_pattern\n+ if not kwargs.get('_skip_index', False):\nself.build_second_index()\ndef build_second_index(self):\n@@ -348,6 +374,15 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nindex[match.group(self.header_group)] = key\nself._id2header = index\n+ def __getstate__(self):\n+ state = super(TwoLayerIndexedFASTA, self).__getstate__()\n+ state['id2header'] = self._id2header\n+ return state\n+\n+ def __setstate__(self, state):\n+ super(TwoLayerIndexedFASTA, self).__setstate__(state)\n+ self._id2header = state['id2header']\n+\ndef get_entry(self, key):\n\"\"\"Get the entry by value of header string or extracted field.\"\"\"\nraw = super(TwoLayerIndexedFASTA, self).get_entry(key)\n@@ -385,7 +420,7 @@ class UniProtMixin(FlavoredMixin):\ndef _add_init(cls):\n- \"\"\"Add and __init__ method to a flavored parser class,\n+ \"\"\"Add an __init__ method to a flavored parser class,\nwhich simply calls __init__ of its two bases.\"\"\"\nflavor, typ = cls.__bases__\nnewdict = cls.__dict__.copy()\n@@ -416,6 +451,7 @@ def _add_init(cls):\nclass UniProt(UniProtMixin, FASTA):\npass\n+@_picklable\n@_add_init\nclass IndexedUniProt(UniProtMixin, TwoLayerIndexedFASTA):\npass\n@@ -440,6 +476,7 @@ class UniRef(UniRefMixin, FASTA):\npass\n+@_picklable\n@_add_init\nclass IndexedUniRef(UniRefMixin, TwoLayerIndexedFASTA):\npass\n@@ -458,6 +495,7 @@ class UniParc(UniParcMixin, FASTA):\npass\n+@_picklable\n@_add_init\nclass IndexedUniParc(UniParcMixin, TwoLayerIndexedFASTA):\npass\n@@ -480,6 +518,7 @@ class UniMes(UniMesMixin, FASTA):\npass\n+@_picklable\n@_add_init\nclass IndexedUniMes(UniMesMixin, TwoLayerIndexedFASTA):\npass\n@@ -500,6 +539,7 @@ class SPD(SPDMixin, FASTA):\npass\n+@_picklable\n@_add_init\nclass IndexedSPD(SPDMixin, TwoLayerIndexedFASTA):\npass\n@@ -518,6 +558,7 @@ class NCBI(NCBIMixin, FASTA):\npass\n+@_picklable\n@_add_init\nclass IndexedNCBI(NCBIMixin, TwoLayerIndexedFASTA):\npass\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add multiprocessing to fasta indexed classes |
377,522 | 15.08.2018 22:15:35 | -10,800 | e11ebe889386c670e3c855658684aeb5c7d3e3bc | Add sensible len() and membership tests for indexed classes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -209,6 +209,7 @@ class FileReader(IteratorContextManager):\ndef remove_bom(bstr):\nreturn bstr.replace(codecs.BOM_LE, b'').lstrip(b\"\\x00\")\n+\nclass IndexedTextReader(FileReader):\n\"\"\"Abstract class for text file readers that keep an index of records for random access.\nThis requires reading the file in binary mode.\"\"\"\n@@ -236,6 +237,12 @@ class IndexedTextReader(FileReader):\nif not _skip_index:\nself._offset_index = self.build_byte_index()\n+ def __len__(self):\n+ return len(self._offset_index)\n+\n+ def __contains__(self, key):\n+ return key in self._offset_index\n+\ndef __getstate__(self):\nstate = super(IndexedTextReader, self).__getstate__()\nstate['offset_index'] = self._offset_index\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -388,6 +388,9 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nif header is not None:\nreturn super(TwoLayerIndexedFASTA, self).get_entry(header)\n+ def __contains__(self, key):\n+ return super(TwoLayerIndexedFASTA, self).__contains__(key) or key in self._id2header\n+\nclass FlavoredMixin():\n\"\"\"Parser aimed at a specific FASTA flavor.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -1050,6 +1050,9 @@ class IndexedXML(XML):\ndef __getitem__(self, elem_id):\nreturn self.get_by_id(elem_id)\n+ def __contains__(self, key):\n+ return key in self._offset_index\n+\nclass MultiProcessingXML(TaskMappingMixin, IndexedXML):\n\"\"\"XML reader that feeds indexes to external processes\n@@ -1066,6 +1069,9 @@ class MultiProcessingXML(TaskMappingMixin, IndexedXML):\niterator = iter(self._hierarchical_offset_index[tag])\nreturn super(MultiProcessingXML, self).map(target, processes, iterator, *args, **kwargs)\n+ def __len__(self):\n+ return len(self._hierarchical_offset_index[self._default_iter_tag])\n+\ndef save_byte_index(index, fp):\n\"\"\"Write the byte offset index to the provided\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add sensible len() and membership tests for indexed classes |
377,522 | 15.08.2018 23:09:50 | -10,800 | 75e56a491501f67e011ebafe9546e20e7c1e7ab5 | Move unpickling of specs to run() in FileReadingProcess | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -432,14 +432,11 @@ class FileReadingProcess(mp.Process):\nThe reader class must support the :py:meth:`__getitem__` dict-like lookup.\n\"\"\"\ndef __init__(self, reader_spec, target_spec, qin, qout, done_flag, args_spec, kwargs_spec):\n- self.reader = serializer.loads(reader_spec)\n- fname = getattr(self.reader, 'name', self.reader.__class__.__name__)\n- target = serializer.loads(target_spec)\n- tname = getattr(target, '__name__', '<?>')\n- super(FileReadingProcess, self).__init__(target=target,\n- name='Process-{}-{}'.format(fname, tname),\n- args=serializer.loads(args_spec),\n- kwargs=serializer.loads(kwargs_spec))\n+ super(FileReadingProcess, self).__init__(name='pyteomics-map-worker')\n+ self.reader_spec = reader_spec\n+ self.target_spec = target_spec\n+ self.args_spec = args_spec\n+ self.kwargs_spec = kwargs_spec\nself._qin = qin\nself._qout = qout\n# self._in_flag = in_flag\n@@ -447,10 +444,14 @@ class FileReadingProcess(mp.Process):\nself.daemon = True\ndef run(self):\n+ reader = serializer.loads(self.reader_spec)\n+ target = serializer.loads(self.target_spec)\n+ args = serializer.loads(self.args_spec)\n+ kwargs = serializer.loads(self.kwargs_spec)\nfor key in iter(self._qin.get, None):\n- item = self.reader[key]\n- if self._target is not None:\n- result = self._target(item, *self._args, **self._kwargs)\n+ item = reader[key]\n+ if target is not None:\n+ result = target(item, *args, **kwargs)\nelse:\nresult = item\nself._qout.put(result)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Move unpickling of specs to run() in FileReadingProcess |
377,522 | 16.08.2018 16:21:01 | -10,800 | bbab5701ae5ee0480bd076bab3a6ed2ac3e62537 | Increase queue sizes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -486,8 +486,8 @@ class TaskMappingMixin(object):\nreader_spec, target_spec, args_spec, kwargs_spec = serialized\ndone_event = mp.Event()\n- in_queue = mp.Queue(10000)\n- out_queue = mp.Queue(1000)\n+ in_queue = mp.Queue(int(1e7))\n+ out_queue = mp.Queue(int(1e7))\nworkers = []\nfor _ in range(processes):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Increase queue sizes |
377,522 | 16.08.2018 23:10:06 | -10,800 | 1567711138f23d4dbd7bf3fc2553dc82491c3a8b | Index by spectrum titles in pepXML | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -105,7 +105,8 @@ class PepXML(xml.MultiProcessingXML):\n_default_version = '1.15'\n_default_iter_tag = 'spectrum_query'\n_indexed_tags = {'spectrum_query'}\n- _indexed_tag_keys = {'spectrum_query': 'index'}\n+ _indexed_tag_keys = {'spectrum_query': 'spectrum'}\n+ _default_id_attr = 'spectrum'\n_structures_to_flatten = {'search_score_summary', 'modification_info'}\n# attributes which contain unconverted values\n_convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff'},\n@@ -133,7 +134,8 @@ class PepXML(xml.MultiProcessingXML):\ntry:\nreturn float(s)\nexcept ValueError:\n- if s.startswith('+-0'): return 0\n+ if s.startswith('+-0'):\n+ return 0\nreturn None\nconverters = {'float': safe_float, 'int': int,\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Index by spectrum titles in pepXML |
377,522 | 17.08.2018 01:29:07 | -10,800 | e3d1d077b5d9a9d8e28b7eaf0140973c7ef1b6a5 | Update conversion rules in pepXML | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -109,8 +109,9 @@ class PepXML(xml.MultiProcessingXML):\n_default_id_attr = 'spectrum'\n_structures_to_flatten = {'search_score_summary', 'modification_info'}\n# attributes which contain unconverted values\n- _convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff'},\n- 'int': {'start_scan', 'end_scan', 'index'},\n+ _convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff',\n+ 'probability', 'variable', 'static'},\n+ 'int': {'start_scan', 'end_scan', 'index', 'num_matched_peptides'},\n'bool': {'is_rejected'},\n'floatarray': {'all_ntt_prob'}}.items()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update conversion rules in pepXML |
377,522 | 23.08.2018 15:10:12 | -10,800 | fd9e67cd4d949d02ac3d83661aa6ac8348f3fecc | Fix fasta picklability for Python 2 | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -213,25 +213,10 @@ class FASTA(aux.FileReader, FASTABase):\ndef _reconstruct(cls, args, kwargs):\n- return cls(*args, **kwargs, _skip_index=True)\n+ kwargs['_skip_index'] = True\n+ return cls(*args, **kwargs)\n-def _picklable(cls):\n- def __init__(self, *args, **kwargs):\n- cls.__init__(self, *args, **kwargs)\n- self._init_args = args\n- self._init_kwargs = kwargs\n-\n- def __reduce_ex__(self, protocol):\n- return (_reconstruct,\n- (self.__class__, self._init_args, self._init_kwargs),\n- self.__getstate__())\n- d = cls.__dict__.copy()\n- d['__init__'] = __init__\n- d['__reduce_ex__'] = __reduce_ex__\n- return type(cls.__name__, cls.__bases__, d)\n-\n-@_picklable\nclass IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\n\"\"\"Indexed FASTA parser. Supports direct indexing by matched labels.\"\"\"\ndelimiter = '>'\n@@ -271,6 +256,13 @@ class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\n\"\"\"\naux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, **kwargs)\nFASTABase.__init__(self, ignore_comments, parser)\n+ self._init_args = (source, ignore_comments, parser)\n+ self._init_kwargs = kwargs\n+\n+ def __reduce_ex__(self, protocol):\n+ return (_reconstruct,\n+ (self.__class__, self._init_args, self._init_kwargs),\n+ self.__getstate__())\ndef _read_protein_lines(self, lines):\ndescription = []\n@@ -310,12 +302,12 @@ class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\nif offsets is not None:\nreturn self._entry_from_offsets(*offsets)\n-@_picklable\n+\nclass TwoLayerIndexedFASTA(IndexedFASTA):\n\"\"\"Parser with two-layer index. Extracted groups are mapped to full headers (where possible),\nfull headers are mapped to byte offsets.\n- When indexed, they key is looked up in both indexes, allowing access by meaningful IDs\n+ When indexed, the key is looked up in both indexes, allowing access by meaningful IDs\n(like UniProt accession) and by full header string.\"\"\"\nheader_group = 1\nheader_pattern = None\n@@ -356,6 +348,8 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nself.header_pattern = header_pattern\nif not kwargs.get('_skip_index', False):\nself.build_second_index()\n+ self._init_args = (source, header_pattern, header_group, ignore_comments, parser)\n+ self._init_kwargs = kwargs\ndef build_second_index(self):\n\"\"\"Create the mapping from extracted field to whole header string.\"\"\"\n@@ -425,6 +419,8 @@ def _add_init(cls):\ndef __init__(self, source, parse=True, **kwargs):\ntyp.__init__(self, source, **kwargs)\nflavor.__init__(self, parse)\n+ self._init_args = (source, parse)\n+ self._init_kwargs = kwargs\nflavor_name = flavor.__name__[:-5]\ntype_name = \"Text-mode\" if typ is FASTA else \"Indexed\"\n@@ -449,7 +445,6 @@ def _add_init(cls):\nclass UniProt(UniProtMixin, FASTA):\npass\n-@_picklable\n@_add_init\nclass IndexedUniProt(UniProtMixin, TwoLayerIndexedFASTA):\npass\n@@ -474,7 +469,6 @@ class UniRef(UniRefMixin, FASTA):\npass\n-@_picklable\n@_add_init\nclass IndexedUniRef(UniRefMixin, TwoLayerIndexedFASTA):\npass\n@@ -493,7 +487,6 @@ class UniParc(UniParcMixin, FASTA):\npass\n-@_picklable\n@_add_init\nclass IndexedUniParc(UniParcMixin, TwoLayerIndexedFASTA):\npass\n@@ -516,7 +509,6 @@ class UniMes(UniMesMixin, FASTA):\npass\n-@_picklable\n@_add_init\nclass IndexedUniMes(UniMesMixin, TwoLayerIndexedFASTA):\npass\n@@ -537,7 +529,6 @@ class SPD(SPDMixin, FASTA):\npass\n-@_picklable\n@_add_init\nclass IndexedSPD(SPDMixin, TwoLayerIndexedFASTA):\npass\n@@ -556,7 +547,6 @@ class NCBI(NCBIMixin, FASTA):\npass\n-@_picklable\n@_add_init\nclass IndexedNCBI(NCBIMixin, TwoLayerIndexedFASTA):\npass\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/runtests.sh",
"new_path": "tests/runtests.sh",
"diff": "@@ -4,9 +4,8 @@ if [ $# -eq 0 ]; then\nfind . -name 'test_*.py' -exec bash -c 'declare -a versions=(2.7 3.3 3.4 3.5 3.6); for v in \"${versions[@]}\"; do echo \"Executing python${v} $0\"; eval \"python${v}\" \"$0\"; done' {} \\;\nelse\nfor f; do\n- for v in 2.7 3.3 3.4 3.5 3.6; do\n- echo \"Executing python${v}\" \"$f\"\n- eval \"python${v}\" \"$f\"\n+ for v in 2.7 3.3 3.4 3.5 3.6 3.7; do\n+ command -v \"python${v}\" >/dev/null 2>&1 && { echo \"Executing python${v}\" \"$f\"; eval \"python${v}\" \"$f\"; }\ndone\ndone\nfi\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -3,6 +3,7 @@ import tempfile\nimport unittest\nimport random\nimport string\n+import pickle\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom pyteomics import fasta\n@@ -46,6 +47,12 @@ class FastaTest(unittest.TestCase):\nwith fasta.TwoLayerIndexedFASTA(self.fasta_file, r'test sequence (.*)') as tlir:\nself.assertEqual(self.fasta_entries_short[2], tlir['4'])\n+ def test_indexed_picklable(self):\n+ reader = fasta.TwoLayerIndexedFASTA(self.fasta_file, r'test sequence (.*)', block_size=7777)\n+ reader2 = pickle.loads(pickle.dumps(reader))\n+ self.assertEqual(reader2.block_size, reader.block_size)\n+ self.assertEqual(self.fasta_entries_short[2], reader2['4'])\n+\ndef test_decoy_sequence_reverse(self):\nsequence = ''.join(random.choice(string.ascii_uppercase)\nfor i in range(random.randint(1, 50)))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix fasta picklability for Python 2 |
377,522 | 23.08.2018 15:24:53 | -10,800 | 4faa1a339f4b1b87f14c3b31742a127c790a171c | Add test for map() in fasta | [
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -53,6 +53,12 @@ class FastaTest(unittest.TestCase):\nself.assertEqual(reader2.block_size, reader.block_size)\nself.assertEqual(self.fasta_entries_short[2], reader2['4'])\n+ def test_mp_map(self):\n+ with fasta.IndexedFASTA(self.fasta_file) as ir:\n+ self.assertEqual(\n+ sorted(self.fasta_entries_short[1:]),\n+ sorted(list(ir.map())))\n+\ndef test_decoy_sequence_reverse(self):\nsequence = ''.join(random.choice(string.ascii_uppercase)\nfor i in range(random.randint(1, 50)))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add test for map() in fasta |
377,530 | 28.08.2018 13:37:57 | 18,000 | 9196992cb4a1aa1506dfe344ed6733a548b9dba9 | Swapping the regex for pepsin1.3 and pepsin2.0 to match the description from Expasy | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -639,10 +639,10 @@ expasy_rules = {\n'iodosobenzoic acid': r'W',\n'lysc': r'K',\n'ntcb': r'\\w(?=C)',\n- 'pepsin ph1.3': r'((?<=[^HKR][^P])[^R](?=[FLWY][^P]))|'\n- r'((?<=[^HKR][^P])[FLWY](?=\\w[^P]))',\n- 'pepsin ph2.0': r'((?<=[^HKR][^P])[^R](?=[FL][^P]))|'\n+ 'pepsin ph1.3': r'((?<=[^HKR][^P])[^R](?=[FL][^P]))|'\nr'((?<=[^HKR][^P])[FL](?=\\w[^P]))',\n+ 'pepsin ph2.0': r'((?<=[^HKR][^P])[^R](?=[FLWY][^P]))|'\n+ r'((?<=[^HKR][^P])[FLWY](?=\\w[^P]))',\n'proline endopeptidase': r'(?<=[HKR])P(?=[^P])',\n'proteinase k': r'[AEFILTVWY]',\n'staphylococcal peptidase i': r'(?<=[^E])E',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Swapping the regex for pepsin1.3 and pepsin2.0 to match the description from Expasy |
377,522 | 05.09.2018 02:36:04 | -10,800 | 41b8982eb599382fc62f411a1b5715cb02c4892e | Rename PEFF to IndexedPEFF | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/peff.py",
"new_path": "pyteomics/peff.py",
"diff": "@@ -18,7 +18,7 @@ building on top of the :class:`~.TwoLayerIndexedFASTA` reader.\nAvailable classes:\n- :py:class:`PEFF` - Parse a PEFF format file in binary-mode, supporting\n+ :py:class:`IndexedPEFF` - Parse a PEFF format file in binary-mode, supporting\ndirect indexing by header string or by tag.\n\"\"\"\n@@ -107,7 +107,7 @@ class Header(Mapping):\nreturn list(base | keys)\n-class PEFF(TwoLayerIndexedFASTA):\n+class IndexedPEFF(TwoLayerIndexedFASTA):\n\"\"\"Creates a :py:class:`PEFF` object.\nParameters\n@@ -152,7 +152,7 @@ class PEFF(TwoLayerIndexedFASTA):\n'|'.join(map(str, self)), )\ndef __init__(self, source, ignore_comments=False, **kwargs):\n- super(PEFF, self).__init__(\n+ super(IndexedPEFF, self).__init__(\nsource, ignore_comments=ignore_comments, parser=self.parser,\nheader_pattern=self.header_pattern, **kwargs)\nself.header_blocks = []\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_peff.py",
"new_path": "tests/test_peff.py",
"diff": "@@ -13,7 +13,7 @@ class PEFFTest(unittest.TestCase):\nself.peff_file = 'test.peff'\ndef test_parse(self):\n- reader = peff.PEFF(self.peff_file)\n+ reader = peff.IndexedPEFF(self.peff_file)\nself.assertEqual(reader.number_of_entries, 5)\nself.assertEqual(len(reader.header_blocks), 1)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Rename PEFF to IndexedPEFF |
377,522 | 05.09.2018 02:49:32 | -10,800 | 9c9d2858fc1b2b02d7bc7d9103b65c3003d6d4be | Add a doc page for peff | [
{
"change_type": "MODIFY",
"old_path": "doc/source/api.rst",
"new_path": "doc/source/api.rst",
"diff": "@@ -15,6 +15,7 @@ Contents:\napi/achrom\napi/electrochem\napi/fasta\n+ api/peff\napi/mzml\napi/mzxml\napi/mgf\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/api/fasta.rst",
"new_path": "doc/source/api/fasta.rst",
"diff": "----------\nfiles : iterable\nIterable of file names or file objects.\n-\n-\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "doc/source/api/peff.rst",
"diff": "+.. automodule:: pyteomics.peff\n+ :exclude-members: Protein\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/peff.py",
"new_path": "pyteomics/peff.py",
"diff": "\"\"\"\npeff - PSI Extended FASTA Format\n-==========================================\n+================================\nPEFF is a forth-coming standard from PSI-HUPO formalizing and extending the\nencoding of protein features and annotations for building search spaces for\n@@ -108,7 +108,7 @@ class Header(Mapping):\nclass IndexedPEFF(TwoLayerIndexedFASTA):\n- \"\"\"Creates a :py:class:`PEFF` object.\n+ \"\"\"Creates an :py:class:`IndexedPEFF` object.\nParameters\n----------\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add a doc page for peff |
377,522 | 05.09.2018 23:15:20 | -10,800 | 5af5be1d68a626d00221442398f6b2d1e829b298 | Remove _skip_empty_cvparam_values | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- New module :py:mod:`pyteomics.peff` implements the :py:class:`IndexedPEFF` parser for protein databases\nin the new PSI standard format, `PEFF <http://www.psidev.info/peff>`_.\n+ - Removed parameter `skip_empty_cvparam_values` in XML parsers. In cvParam elements, missing \"value\"\n+ attribute is now always equivalent to the case when it is equal to an empty string. This affects\n+ the structure of items produced by MzML and MzIdentML parsers.\n+\n3.5.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.0.dev2\n\\ No newline at end of file\n+4.0.dev3\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -209,7 +209,6 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML, xml.MultiProcessingXML)\nif found_compression_types:\nfound_compression_types = tuple(found_compression_types)\nif len(found_compression_types) == 1:\n- if not self._skip_empty_cvparam_values:\ndel info['name'][found_compression_types[0]]\nreturn found_compression_types[0]\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -124,7 +124,6 @@ class XML(FileReader):\n_schema_location_param = 'schemaLocation'\n_default_id_attr = 'id'\n_huge_tree = False\n- _skip_empty_cvparam_values = False\n_retrieve_refs_enabled = None # only some subclasses implement this\n# Configurable plugin logic\n@@ -162,14 +161,6 @@ class XML(FileReader):\nDefault is :py:const:`False`.\nEnable this option for trusted files to avoid XMLSyntaxError exceptions\n(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).\n- skip_empty_cvparam_values : bool, optional\n- .. warning ::\n- This parameter affects the format of the produced dictionaries.\n-\n- By default, when parsing cvParam elements, \"value\" attributes with empty values are not\n- treated differently from others. When this parameter is set to :py:const:`True`,\n- these empty values are flattened. You can enable this to obtain the same output structure\n- regardless of the presence of an empty \"value\". Default is :py:const:`False`.\n\"\"\"\nsuper(XML, self).__init__(source, 'rb', self.iterfind, False,\n@@ -190,7 +181,6 @@ class XML(FileReader):\nself._converters_items = self._converters.items()\nself._huge_tree = kwargs.get('huge_tree', self._huge_tree)\n- self._skip_empty_cvparam_values = kwargs.get('skip_empty_cvparam_values', False)\nself._retrieve_refs_enabled = kwargs.get('retrieve_refs')\ndef __reduce_ex__(self, protocol):\n@@ -202,7 +192,6 @@ class XML(FileReader):\ndef __getstate__(self):\nstate = super(XML, self).__getstate__()\nstate['_huge_tree'] = self._huge_tree\n- state['_skip_empty_cvparam_values'] = self._skip_empty_cvparam_values\nstate['_retrieve_refs_enabled'] = self._retrieve_refs_enabled\nstate['_id_dict'] = self._id_dict\nreturn state\n@@ -210,7 +199,6 @@ class XML(FileReader):\ndef __setstate__(self, state):\nsuper(XML, self).__setstate__(state)\nself._huge_tree = state['_huge_tree']\n- self._skip_empty_cvparam_values = state['_skip_empty_cvparam_values']\nself._retrieve_refs_enabled = state['_retrieve_refs_enabled']\nself._id_dict = state['_id_dict']\n@@ -328,21 +316,18 @@ class XML(FileReader):\nunit_accesssion = None\nif 'unitCvRef' in attribs or 'unitName' in attribs:\nunit_accesssion = attribs.get('unitAccession')\n- unit_name = attribs.get(\"unitName\", unit_accesssion)\n+ unit_name = attribs.get('unitName', unit_accesssion)\nunit_info = unit_name\n- accession = attribs.get(\"accession\")\n- if 'value' in attribs and (not self._skip_empty_cvparam_values or\n- attribs['value'] != ''):\n+ accession = attribs.get('accession')\n+ value = attribs.get('value', '')\ntry:\nif attribs.get('type') in types:\n- value = types[attribs['type']](attribs['value'], unit_info)\n+ value = types[attribs['type']](value, unit_info)\nelse:\n- value = unitfloat(attribs['value'], unit_info)\n+ value = unitfloat(value, unit_info)\nexcept ValueError:\n- value = unitstr(attribs['value'], unit_info)\n+ value = unitstr(value, unit_info)\nreturn {cvstr(attribs['name'], accession, unit_accesssion): value}\n- else:\n- return {'name': cvstr(attribs['name'], accession, unit_accesssion)}\ndef _get_info(self, element, **kwargs):\n\"\"\"Extract info from element's attributes, possibly recursive.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1270,52 +1270,6 @@ mzml_spectra = [{'MSn spectrum': '',\n'scan window upper limit': 2000.0}]}}]},\n'total ion current': 15245068.0}]\n-mzml_spectra_skip_empty_values = [{'base peak intensity': 1471973.875,\n- 'base peak m/z': 810.415283203125,\n- 'count': 2,\n- 'defaultArrayLength': 19914,\n- 'highest observed m/z': 2000.0099466203771,\n- 'id': 'controllerType=0 controllerNumber=1 scan=1',\n- 'index': 0,\n- 'intensity array': makeCA(mzml_int_array),\n- 'lowest observed m/z': 200.00018816645022,\n- 'm/z array': makeCA(mzml_mz_array),\n- 'ms level': 1,\n- 'name': ['MSn spectrum', 'positive scan', 'profile spectrum'],\n- 'scanList': {'count': 1,\n- 'name': 'no combination',\n- 'scan': [{'[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688,\n- 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]',\n- 'instrumentConfigurationRef': 'IC1',\n- 'preset scan configuration': 1.0,\n- 'scan start time': 0.004935,\n- 'scanWindowList': {'count': 1,\n- 'scanWindow': [{'scan window lower limit': 200.0,\n- 'scan window upper limit': 2000.0}]}}]},\n- 'total ion current': 15245068.0},\n- {'base peak intensity': 1471973.875,\n- 'base peak m/z': 810.415283203125,\n- 'count': 2,\n- 'defaultArrayLength': 19914,\n- 'highest observed m/z': 2000.0099466203771,\n- 'id': 'controllerType=0 controllerNumber=1 scan=2',\n- 'index': 1,\n- 'intensity array': makeCA(mzml_int_array),\n- 'lowest observed m/z': 200.00018816645022,\n- 'm/z array': makeCA(mzml_mz_array),\n- 'ms level': 1,\n- 'name': ['MSn spectrum', 'positive scan', 'profile spectrum'],\n- 'scanList': {'count': 1,\n- 'name': 'no combination',\n- 'scan': [{'[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688,\n- 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]',\n- 'instrumentConfigurationRef': 'IC1',\n- 'preset scan configuration': 1.0,\n- 'scan start time': 0.004935,\n- 'scanWindowList': {'count': 1,\n- 'scanWindow': [{'scan window lower limit': 200.0,\n- 'scan window upper limit': 2000.0}]}}]},\n- 'total ion current': 15245068.0}]\nmgf_int = [np.array([ 73., 44., 67., 291., 54., 49.]),\nnp.array([ 237., 128., 108., 1007., 974., 79.])]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -8,7 +8,7 @@ from itertools import product\nimport unittest\nfrom pyteomics.mzml import MzML, PreIndexedMzML, read, chain\nfrom pyteomics import auxiliary as aux, xml\n-from data import mzml_spectra, mzml_spectra_skip_empty_values\n+from data import mzml_spectra\nimport numpy as np\nimport pickle\nimport operator as op\n@@ -31,10 +31,6 @@ class MzmlTest(unittest.TestCase):\nwith MzML(self.path) as f:\nself.assertEqual(sorted(mzml_spectra, key=key), sorted(list(f.map()), key=key))\n- def test_read_skip_empty_values(self):\n- with MzML(self.path, skip_empty_cvparam_values=True) as r:\n- self.assertEqual(mzml_spectra_skip_empty_values, list(r))\n-\ndef test_decoding(self):\nwith MzML(self.path, decode_binary=True) as reader:\nspectrum = next(reader)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove _skip_empty_cvparam_values |
377,522 | 18.09.2018 19:28:52 | -10,800 | 3946568029a3bcaefdb785fbad0ca10a1214ba62 | Fix in CVQueryEngine._is_empty | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/structures.py",
"new_path": "pyteomics/auxiliary/structures.py",
"diff": "@@ -289,7 +289,7 @@ class CVQueryEngine(object):\ndef _is_empty(self, value):\nif isinstance(value, basestring):\n- return value != ''\n+ return value == ''\nreturn False\ndef _walk_dict(self, data, index):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -35,11 +35,11 @@ import ast\nimport os\nimport json\nimport warnings\n-from collections import OrderedDict, defaultdict\n+from collections import OrderedDict\nfrom lxml import etree\nimport numpy as np\n-from .auxiliary import FileReader, PyteomicsError, basestring, _file_obj, OffsetIndex, HierarchicalOffsetIndex\n+from .auxiliary import FileReader, PyteomicsError, basestring, _file_obj, HierarchicalOffsetIndex\nfrom .auxiliary import unitint, unitfloat, unitstr, cvstr\nfrom .auxiliary import _keepstate_method as _keepstate\nfrom .auxiliary import BinaryDataArrayTransformer\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix in CVQueryEngine._is_empty |
377,522 | 20.09.2018 19:29:12 | -10,800 | 40a9f83eaa813239bcb2d3053a0388ec5e3f8407 | Remove unused flat index classes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -871,58 +871,6 @@ class TagSpecificXMLByteIndex(object):\nreturn indexer.offsets\n-class FlatOffsetIndex(HierarchicalOffsetIndex):\n- def __len__(self):\n- return len(self.mapping)\n-\n- def find(self, key, element_type=None):\n- return self[element_type][key]\n-\n- def save(self, fp):\n- encoded_index = dict()\n- container = {\n- self._schema_version_tag_key: self.schema_version,\n- }\n- for key, offset in self.items():\n- encoded_index[key] = offset\n- container['index'] = encoded_index\n- json.dump(container, fp)\n-\n- @classmethod\n- def load(cls, fp):\n- container = json.load(fp)\n- version_tag = container.get(cls._schema_version_tag_key)\n- if version_tag is None:\n- # The legacy case, no special processing yet\n- return cls(container)\n- version_tag = tuple(version_tag)\n- index = container.get(\"index\")\n- if version_tag < cls.schema_version:\n- # schema upgrade case, no special processing yet\n- return cls(index)\n- # no need to upgrade\n- return cls(index)\n-\n-\n-class FlatTagSpecificXMLByteIndex(TagSpecificXMLByteIndex):\n- \"\"\"\n- An alternative interface on top of :py:class:`TagSpecificXMLByteIndex` that assumes\n- that identifiers across different tags are globally unique, as in MzIdentML.\n-\n- Attributes\n- ----------\n- offsets : dict\n- The mapping between ids and byte offsets.\n- \"\"\"\n- def build_index(self):\n- hierarchical_index = super(FlatTagSpecificXMLByteIndex, self).build_index()\n- self.offsets = FlatOffsetIndex(_flatten_map(hierarchical_index))\n- return self.offsets\n-\n- def __len__(self):\n- return len(self.offsets)\n-\n-\ndef ensure_bytes_single(string):\nif isinstance(string, bytes):\nreturn string\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove unused flat index classes |
377,522 | 25.09.2018 15:55:53 | -10,800 | da39914648c74ab3371d241e1a17bbe8b3e487c9 | Fix IndexedMGF for files with zero spectra | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -274,7 +274,10 @@ class IndexedMGF(aux.TaskMappingMixin, aux.IndexedTextReader, MGFBase):\n@aux._keepstate_method\ndef _read_header(self):\n+ try:\nfirst = next(v for v in self._offset_index.values())[0]\n+ except StopIteration: # the index is empty, no spectra in file\n+ first = -1\nheader_lines = self.read(first).decode(self.encoding).split('\\n')\nreturn self._read_header_lines(header_lines)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix IndexedMGF for files with zero spectra |
377,522 | 01.10.2018 17:57:41 | -10,800 | bcc3e23dbdffaf92dbae4f2a228b77c28653b5fd | Add IndexedReaderMixin with common rich indexing methods | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -15,7 +15,8 @@ from .constants import _nist_mass\nfrom .file_helpers import (\n_file_obj, _keepstate, _keepstate_method, IteratorContextManager,\n- FileReader, IndexedTextReader, OffsetIndex, HierarchicalOffsetIndex,\n+ FileReader, IndexedTextReader, IndexedReaderMixin,\n+ OffsetIndex, HierarchicalOffsetIndex,\n_file_reader, _file_writer,\n_make_chain, _check_use_index, FileReadingProcess, TaskMappingMixin,\nserializer)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -42,6 +42,11 @@ try:\nexcept ImportError:\nfrom Queue import Empty\n+try:\n+ from collections.abc import Sequence\n+except ImportError:\n+ from collections import Sequence\n+\nfrom . import PyteomicsError\ndef _keepstate(func):\n@@ -211,7 +216,84 @@ def remove_bom(bstr):\nreturn bstr.replace(codecs.BOM_LE, b'').lstrip(b\"\\x00\")\n-class IndexedTextReader(FileReader):\n+class IndexedReaderMixin():\n+ \"\"\"Common interface for :py:class:`IndexedTextReader` and :py:class:`IndexedXML`.\"\"\"\n+ @property\n+ def index(self):\n+ return self._offset_index\n+\n+ @property\n+ def default_index(self):\n+ return self._offset_index\n+\n+ def __len__(self):\n+ return len(self._offset_index)\n+\n+ def __contains__(self, key):\n+ return key in self._offset_index\n+\n+ def _item_from_offsets(self, offsets):\n+ raise NotImplementedError\n+\n+ def get_by_id(self, elem_id):\n+ index = self.default_index\n+ if index is None:\n+ raise PyteomicsError('Access by ID requires building an offset index.')\n+ offsets = index[elem_id]\n+ return self._item_from_offsets(offsets)\n+\n+ def get_by_ids(self, ids):\n+ return [self.get_by_id(key) for key in ids]\n+\n+ def get_by_index(self, i):\n+ try:\n+ key = self.default_index.from_index(i, False)\n+ except AttributeError:\n+ raise PyteomicsError('Positional access requires building an offset index.')\n+ return self.get_by_id(key)\n+\n+ def get_by_indexes(self, indexes):\n+ return [self.get_by_index(i) for i in indexes]\n+\n+ def get_by_index_slice(self, s):\n+ try:\n+ keys = self.default_index.from_slice(s, False)\n+ except AttributeError:\n+ raise PyteomicsError('Positional access requires building an offset index.')\n+ return self.get_by_ids(keys)\n+\n+ def get_by_key_slice(self, s):\n+ keys = self.default_index.between(s.start, s.stop)\n+ if s.step:\n+ keys = keys[::s.step]\n+ return self.get_by_ids(keys)\n+\n+ def __getitem__(self, key):\n+ if isinstance(key, basestring):\n+ return self.get_by_id(key)\n+ if isinstance(key, int):\n+ return self.get_by_index(key)\n+ if isinstance(key, Sequence):\n+ if not key:\n+ return []\n+ if isinstance(key[0], int):\n+ return self.get_by_indexes(key)\n+ if isinstance(key[0], basestring):\n+ return self.get_by_ids(key)\n+ if isinstance(key, slice):\n+ for item in (key.start, key.stop, key.step):\n+ if item is not None:\n+ break\n+ if isinstance(item, int):\n+ return self.get_by_index_slice(key)\n+ if isinstance(item, basestring):\n+ return self.get_by_key_slice(key)\n+ if item is None:\n+ return list(self)\n+ raise PyteomicsError('Unsupported query key: {}'.format(key))\n+\n+\n+class IndexedTextReader(IndexedReaderMixin, FileReader):\n\"\"\"Abstract class for text file readers that keep an index of records for random access.\nThis requires reading the file in binary mode.\"\"\"\n@@ -238,16 +320,6 @@ class IndexedTextReader(FileReader):\nif not _skip_index:\nself._offset_index = self.build_byte_index()\n- @property\n- def index(self):\n- return self._offset_index\n-\n- def __len__(self):\n- return len(self._offset_index)\n-\n- def __contains__(self, key):\n- return key in self._offset_index\n-\ndef __getstate__(self):\nstate = super(IndexedTextReader, self).__getstate__()\nstate['offset_index'] = self._offset_index\n@@ -321,6 +393,8 @@ class IndexedTextReader(FileReader):\nlines = self._source.read(end-start).decode(self.encoding).split('\\n')\nreturn lines\n+\n+\ndef _file_reader(_mode='r'):\n# a lot of the code below is borrowed from\n# http://stackoverflow.com/a/14095585/1258041\n@@ -401,16 +475,16 @@ class OffsetIndex(OrderedDict):\nParameters\n----------\nindex: int\n- The index to retrieve\n+ The index to retrieve.\ninclude_value: bool\nWhether to return both the key and the value or just the key.\n- Defaults to :const:`False`\n+ Defaults to :const:`False`.\nReturns\n-------\nobject:\nIf ``include_value`` is :const:`True`, a tuple of (key, value) at ``index``\n- else just the key at ``index``\n+ else just the key at ``index``.\n'''\nitems = self.index_sequence\nif include_value:\n@@ -439,6 +513,31 @@ class OffsetIndex(OrderedDict):\nitems = self.index_sequence\nreturn [(k, v) if include_value else k for k, v in items[spec]]\n+ def between(self, start, stop, include_value=False):\n+ keys = list(self)\n+ if start is not None:\n+ try:\n+ start_index = keys.index(start)\n+ except ValueError:\n+ raise KeyError(start)\n+ else:\n+ start_index = 0\n+ if stop is not None:\n+ try:\n+ stop_index = keys.index(stop)\n+ except ValueError:\n+ raise KeyError(stop)\n+ else:\n+ stop_index = len(keys) - 1\n+ if start is None or stop is None:\n+ pass # won't switch indices\n+ else:\n+ start_index, stop_index = min(start_index, stop_index), max(start_index, stop_index)\n+\n+ if include_value:\n+ return [(k, self[k]) for k in keys[start_index:stop_index + 1]]\n+ return keys[start_index:stop_index + 1]\n+\ndef __repr__(self):\ntemplate = \"{self.__class__.__name__}({items})\"\nreturn template.format(self=self, items=list(self.items()))\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -133,9 +133,6 @@ class FASTABase():\ndef get_entry(self, key):\nraise NotImplementedError\n- def __getitem__(self, key):\n- return self.get_entry(key)\n-\nclass FASTA(aux.FileReader, FASTABase):\n\"\"\"Text-mode, sequential FASTA parser.\n@@ -289,18 +286,19 @@ class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\ndescription = self.parser(description)\nreturn Protein(description, sequence)\n- def _entry_from_offsets(self, start, end):\n+ def _item_from_offsets(self, offsets):\n+ start, end = offsets\nlines = self._read_lines_from_offsets(start, end)\nreturn self._read_protein_lines(lines)\ndef _read(self, **kwargs):\nfor key, offsets in self._offset_index.items():\n- yield self._entry_from_offsets(*offsets)\n+ yield self._item_from_offsets(offsets)\ndef get_entry(self, key):\noffsets = self._offset_index.get(key)\nif offsets is not None:\n- return self._entry_from_offsets(*offsets)\n+ return self._item_from_offsets(offsets)\nclass TwoLayerIndexedFASTA(IndexedFASTA):\n@@ -372,15 +370,16 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nsuper(TwoLayerIndexedFASTA, self).__setstate__(state)\nself._id2header = state['id2header']\n- def get_entry(self, key):\n+ def get_by_id(self, key):\n\"\"\"Get the entry by value of header string or extracted field.\"\"\"\n- raw = super(TwoLayerIndexedFASTA, self).get_entry(key)\n- if raw is not None:\n- return raw\n+ try:\n+ return super(TwoLayerIndexedFASTA, self).get_by_id(key)\n+ except KeyError:\nif self._id2header:\nheader = self._id2header.get(key)\nif header is not None:\nreturn super(TwoLayerIndexedFASTA, self).get_entry(header)\n+ raise KeyError(key)\ndef __contains__(self, key):\nreturn super(TwoLayerIndexedFASTA, self).__contains__(key) or key in self._id2header\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -281,26 +281,18 @@ class IndexedMGF(aux.TaskMappingMixin, aux.IndexedTextReader, MGFBase):\nheader_lines = self.read(first).decode(self.encoding).split('\\n')\nreturn self._read_header_lines(header_lines)\n+ def _item_from_offsets(self, offsets):\n+ start, end = offsets\n+ lines = self._read_lines_from_offsets(start, end)\n+ return self._read_spectrum_lines(lines)\n+\ndef _read(self, **kwargs):\nfor _, offsets in self._offset_index.items():\n- spectrum = self._read_spectrum(*offsets)\n+ spectrum = self._item_from_offsets(offsets)\nyield spectrum\n- def _read_spectrum(self, start, end):\n- \"\"\"Read a single spectrum from ``self._source``.\n-\n- Returns\n- -------\n- out : dict\n- \"\"\"\n- lines = self._read_lines_from_offsets(start, end)\n- return self._read_spectrum_lines(lines)\n-\n- # @aux._keepstate_method\n- def get_spectrum(self, title):\n- if title in self._offset_index:\n- start, end = self._offset_index[title]\n- return self._read_spectrum(start, end)\n+ def get_spectrum(self, key):\n+ return self.get_by_id(key)\nclass MGF(aux.FileReader, MGFBase):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -33,7 +33,6 @@ from traceback import format_exc\nimport operator as op\nimport ast\nimport os\n-import json\nimport warnings\nfrom collections import OrderedDict\nfrom lxml import etree\n@@ -43,7 +42,7 @@ from .auxiliary import FileReader, PyteomicsError, basestring, _file_obj, Hierar\nfrom .auxiliary import unitint, unitfloat, unitstr, cvstr\nfrom .auxiliary import _keepstate_method as _keepstate\nfrom .auxiliary import BinaryDataArrayTransformer\n-from .auxiliary import TaskMappingMixin\n+from .auxiliary import TaskMappingMixin, IndexedReaderMixin\ntry: # Python 2.7\nfrom urllib2 import urlopen, URLError\n@@ -545,6 +544,7 @@ class XML(FileReader):\nreturn elem\nif not found:\nelem.clear()\n+ raise KeyError(elem_id)\n@_keepstate\ndef get_by_id(self, elem_id, **kwargs):\n@@ -895,7 +895,7 @@ def _flatten_map(hierarchical_map):\nreturn OrderedDict(all_records)\n-class IndexedXML(XML):\n+class IndexedXML(IndexedReaderMixin, XML):\n\"\"\"Subclass of :py:class:`XML` which uses an index of byte offsets for some\nelements for quick random access.\n\"\"\"\n@@ -948,8 +948,8 @@ class IndexedXML(XML):\nself._build_index()\n@property\n- def index(self):\n- return self._offset_index\n+ def default_index(self):\n+ return self._offset_index[self._default_iter_tag]\ndef __reduce_ex__(self, protocol):\nreconstructor, args, state = XML.__reduce_ex__(self, protocol)\n@@ -1016,11 +1016,8 @@ class IndexedXML(XML):\ndata = self._get_info_smart(elem, **kwargs)\nreturn data\n- def __getitem__(self, elem_id):\n- return self.get_by_id(elem_id)\n-\ndef __contains__(self, key):\n- return key in self._offset_index\n+ return key in self._offset_index[self._default_iter_tag]\ndef __len__(self):\nreturn len(self._offset_index[self._default_iter_tag])\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -14,7 +14,6 @@ psms = list(zip(count(), string.ascii_uppercase + string.ascii_lowercase,\nnp.arange(0.01, 0.062, 0.001)))\nclass QvalueTest(unittest.TestCase):\n-\nkey = staticmethod(op.itemgetter(0))\nis_decoy = staticmethod(lambda x: x[1].islower())\npep = staticmethod(op.itemgetter(2))\n@@ -783,7 +782,7 @@ class FDRTest(unittest.TestCase):\npep = [self.pep((s, l, p)) for s, l, p in psms]\nself._run_check(psms, is_decoy=isd, pep=pep)\n-class OtherTests(unittest.TestCase):\n+class RegressionTests(unittest.TestCase):\nx = [1, 2, 3]\ny = [3, 5, 7]\na = 2\n@@ -848,6 +847,33 @@ class OtherTests(unittest.TestCase):\nwith self.assertRaises(aux.PyteomicsError):\naux.linear_regression_perpendicular(self.x)\n+class OffsetIndexTests(unittest.TestCase):\n+ def setUp(self):\n+ self.sequence = [(str(i), i) for i in range(10)]\n+ self.index = aux.OffsetIndex(self.sequence)\n+\n+ def test_index_sequence(self):\n+ self.assertEqual(self.index.index_sequence, tuple(self.sequence))\n+\n+ def test_find(self):\n+ self.assertEqual(self.index.find('3'), 3)\n+\n+ def test_from_index(self):\n+ self.assertEqual(self.index.from_index(3), '3')\n+ self.assertEqual(self.index.from_index(4, True), ('4', 4))\n+\n+ def test_from_slice(self):\n+ self.assertEqual(self.index.from_slice(slice(1, 3)), ['1', '2'])\n+ self.assertEqual(self.index.from_slice(slice(1, 3), True), self.sequence[1:3])\n+\n+ def test_between(self):\n+ self.assertEqual(self.index.between('1', '3'), ['1', '2', '3'])\n+ self.assertEqual(self.index.between('1', '3', True), [('1', 1), ('2', 2), ('3', 3)])\n+ self.assertEqual(self.index.between('3', '1'), ['1', '2', '3'])\n+ self.assertEqual(self.index.between(None, '3'), ['0', '1', '2', '3'])\n+ self.assertEqual(self.index.between('8', None), ['8', '9'])\n+\n+\nimport warnings\nif __name__ == '__main__':\nwith warnings.catch_warnings():\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -142,6 +142,19 @@ class MzmlTest(unittest.TestCase):\nwith pickle.loads(spec) as reader:\nself.assertEqual(next(reader)['id'], expected_data['id'])\n+ def test_indexing(self):\n+ with MzML(self.path) as reader:\n+ self.assertEqual(mzml_spectra[0], reader[0])\n+ self.assertEqual(mzml_spectra[0], reader['controllerType=0 controllerNumber=1 scan=1'])\n+ self.assertEqual(mzml_spectra, reader[0:2])\n+ self.assertEqual(mzml_spectra,\n+ [reader['controllerType=0 controllerNumber=1 scan=1'],\n+ reader['controllerType=0 controllerNumber=1 scan=2']])\n+ self.assertEqual(mzml_spectra, reader[[0, 1]])\n+ self.assertEqual(mzml_spectra, reader[\n+ ['controllerType=0 controllerNumber=1 scan=1', 'controllerType=0 controllerNumber=1 scan=2']])\n+ self.assertEqual(mzml_spectra, reader[\n+ 'controllerType=0 controllerNumber=1 scan=2':'controllerType=0 controllerNumber=1 scan=1'])\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add IndexedReaderMixin with common rich indexing methods |
377,522 | 04.10.2018 17:28:03 | -10,800 | 88f9507c3399cbba79887efaa344e27b3e891aea | Add retention time indexing | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -15,7 +15,7 @@ from .constants import _nist_mass\nfrom .file_helpers import (\n_file_obj, _keepstate, _keepstate_method, IteratorContextManager,\n- FileReader, IndexedTextReader, IndexedReaderMixin,\n+ FileReader, IndexedTextReader, IndexedReaderMixin, TimeOrderedIndexedReaderMixin,\nOffsetIndex, HierarchicalOffsetIndex,\n_file_reader, _file_writer,\n_make_chain, _check_use_index, FileReadingProcess, TaskMappingMixin,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -216,7 +216,7 @@ def remove_bom(bstr):\nreturn bstr.replace(codecs.BOM_LE, b'').lstrip(b\"\\x00\")\n-class IndexedReaderMixin():\n+class IndexedReaderMixin(object):\n\"\"\"Common interface for :py:class:`IndexedTextReader` and :py:class:`IndexedXML`.\"\"\"\n@property\ndef index(self):\n@@ -293,6 +293,87 @@ class IndexedReaderMixin():\nraise PyteomicsError('Unsupported query key: {}'.format(key))\n+class RTLocator():\n+ def __init__(self, reader):\n+ self._reader = reader\n+\n+ def _get_scan_by_time(self, time):\n+ \"\"\"Retrieve the scan object for the specified scan time.\n+\n+ Parameters\n+ ----------\n+ time : float\n+ The time to get the nearest scan from\n+ Returns\n+ -------\n+ tuple: (scan_id, scan, scan_time)\n+ \"\"\"\n+ if not self._reader.default_index:\n+ raise PyteomicsError(\"This method requires the index. Please pass `use_index=True` during initialization\")\n+\n+ scan_ids = tuple(self._reader.default_index)\n+ lo = 0\n+ hi = len(scan_ids)\n+\n+ best_match = None\n+ best_error = float('inf')\n+ best_time = None\n+ best_id = None\n+\n+ if time == float('inf'):\n+ scan = self._reader.get_by_id(scan_ids[-1])\n+ return scan_ids[-1], scan, self._reader._get_time(scan)\n+\n+ while hi != lo:\n+ mid = (hi + lo) // 2\n+ sid = scan_ids[mid]\n+ scan = self._reader.get_by_id(sid)\n+ scan_time = self._reader._get_time(scan)\n+ err = abs(scan_time - time)\n+ if err < best_error:\n+ best_error = err\n+ best_match = scan\n+ best_time = scan_time\n+ best_id = sid\n+ if scan_time == time:\n+ return sid, scan, scan_time\n+ elif (hi - lo) == 1:\n+ return best_id, best_match, best_time\n+ elif scan_time > time:\n+ hi = mid\n+ else:\n+ lo = mid\n+\n+ def __getitem__(self, key):\n+ if isinstance(key, (int, float)):\n+ return self._get_scan_by_time(key)[1]\n+ if isinstance(key, Sequence):\n+ return [self._get_scan_by_time(t)[1] for t in key]\n+ if isinstance(key, slice):\n+ if key.start is None:\n+ start_index = self._reader.default_index.from_index(0)\n+ else:\n+ start_index = self._get_scan_by_time(key.start)[0]\n+ if key.stop is None:\n+ stop_index = self._reader.default_index.from_index(-1)\n+ else:\n+ stop_index = self._get_scan_by_time(key.stop)[0]\n+ return self._reader[start_index:stop_index:key.step]\n+\n+\n+class TimeOrderedIndexedReaderMixin(IndexedReaderMixin):\n+ @property\n+ def time(self):\n+ return self._time\n+\n+ def __init__(self, *args, **kwargs):\n+ super(TimeOrderedIndexedReaderMixin, self).__init__(*args, **kwargs)\n+ self._time = RTLocator(self)\n+\n+ def _get_time(self, scan):\n+ raise NotImplementedError\n+\n+\nclass IndexedTextReader(IndexedReaderMixin, FileReader):\n\"\"\"Abstract class for text file readers that keep an index of records for random access.\nThis requires reading the file in binary mode.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -296,9 +296,7 @@ class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\nyield self._item_from_offsets(offsets)\ndef get_entry(self, key):\n- offsets = self._offset_index.get(key)\n- if offsets is not None:\n- return self._item_from_offsets(offsets)\n+ return self.get_by_id(key)\nclass TwoLayerIndexedFASTA(IndexedFASTA):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -188,6 +188,8 @@ class MGFBase():\nparams['pepmass'] = pepmass + (None,) * (2-len(pepmass))\nif isinstance(params.get('charge'), aux.basestring):\nparams['charge'] = aux._parse_charge(params['charge'], True)\n+ if 'rtinseconds' in params:\n+ params['rtinseconds'] = aux.unitfloat(params['rtinseconds'], 'second')\nout = {'params': params}\ndata = {'m/z array': masses, 'intensity array': intensities}\nif self._read_charges:\n@@ -224,7 +226,7 @@ class MGFBase():\nreturn self.get_spectrum(key)\n-class IndexedMGF(aux.TaskMappingMixin, aux.IndexedTextReader, MGFBase):\n+class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexedTextReader, MGFBase):\n\"\"\"\nA class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\nparsing. Specific spectra can be accessed by title using the indexing syntax in constant time.\n@@ -247,7 +249,7 @@ class IndexedMGF(aux.TaskMappingMixin, aux.IndexedTextReader, MGFBase):\n\"\"\"\ndelimiter = 'BEGIN IONS'\n- label = r'TITLE=([^\\n]+)\\n'\n+ label = r'TITLE=([^\\n]*\\w)\\r?\\n'\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\ndtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n@@ -294,6 +296,12 @@ class IndexedMGF(aux.TaskMappingMixin, aux.IndexedTextReader, MGFBase):\ndef get_spectrum(self, key):\nreturn self.get_by_id(key)\n+ def _get_time(self, spectrum):\n+ try:\n+ return spectrum['params']['rtinseconds']\n+ except KeyError:\n+ raise aux.PyteomicsError('RT information not found.')\n+\nclass MGF(aux.FileReader, MGFBase):\n\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -90,7 +90,7 @@ STANDARD_ARRAYS = set([\n])\n-class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML, xml.MultiProcessingXML):\n+class MzML(xml.ArrayConversionMixin, aux.TimeOrderedIndexedReaderMixin, xml.MultiProcessingXML, xml.IndexSavingXML):\n\"\"\"Parser class for mzML files.\"\"\"\nfile_format = 'mzML'\n_root_element = 'mzML'\n@@ -288,6 +288,9 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML, xml.MultiProcessingXML)\ndel info[k]\ninfo.pop('id', None)\n+ def _get_time(self, scan):\n+ return scan['scanList']['scan'][0]['scan start time']\n+\ndef read(source, read_schema=False, iterative=True, use_index=False, dtype=None, huge_tree=False):\n\"\"\"Parse `source` and iterate through spectra.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzxml.py",
"new_path": "pyteomics/mzxml.py",
"diff": "@@ -124,7 +124,7 @@ class IteratorQueue(object):\nyield item\n-class MzXML(xml.ArrayConversionMixin, xml.IndexSavingXML, xml.MultiProcessingXML):\n+class MzXML(xml.ArrayConversionMixin, aux.TimeOrderedIndexedReaderMixin, xml.MultiProcessingXML, xml.IndexSavingXML):\n\"\"\"Parser class for mzXML files.\"\"\"\n_root_element = 'mzXML'\n_default_iter_tag = 'scan'\n@@ -219,6 +219,9 @@ class MzXML(xml.ArrayConversionMixin, xml.IndexSavingXML, xml.MultiProcessingXML\nfor item in super(MzXML, self).iterfind(path, **kwargs):\nyield item\n+ def _get_time(self, scan):\n+ return scan['retentionTime']\n+\ndef read(source, read_schema=False, iterative=True, use_index=False, dtype=None, huge_tree=False):\n\"\"\"Parse `source` and iterate through spectra.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1264,7 +1264,7 @@ mzml_spectra = [{'MSn spectrum': '',\n'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]',\n'instrumentConfigurationRef': 'IC1',\n'preset scan configuration': 1.0,\n- 'scan start time': 0.004935,\n+ 'scan start time': 0.005935,\n'scanWindowList': {'count': 1,\n'scanWindow': [{'scan window lower limit': 200.0,\n'scan window upper limit': 2000.0}]}}]},\n@@ -1303,7 +1303,7 @@ mgf_spectra_long = [{'intensity array': makeCA(mgf_int[0]),\n'mass': 'Monoisotopic',\n'mods': 'Carbamidomethyl (C)',\n'pepmass': (1084.9, 1234.0),\n- 'rtinseconds': '25',\n+ 'rtinseconds': 25.0,\n'scans': '3',\n'title': 'Spectrum 2',\n'useremail': 'leu@altered-state.edu',\n@@ -1318,7 +1318,7 @@ mgf_spectra_short = [{'intensity array': makeCA(mgf_int[0]),\n'm/z array': makeCA(mgf_mz[1]),\n'charge array': makeCA(mgf_ch[1]),\n'params': {'pepmass': (1084.9, 1234.0),\n- 'rtinseconds': '25',\n+ 'rtinseconds': 25.0,\n'scans': '3',\n'title': 'Spectrum 2'}}]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test.mzML",
"new_path": "tests/test.mzML",
"diff": "<scanList count=\"1\">\n<cvParam cvRef=\"MS\" accession=\"MS:1000795\" name=\"no combination\" value=\"\"/>\n<scan instrumentConfigurationRef=\"IC1\">\n- <cvParam cvRef=\"MS\" accession=\"MS:1000016\" name=\"scan start time\" value=\"0.0049350000000000002\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000016\" name=\"scan start time\" value=\"0.0059350000000000002\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n<cvParam cvRef=\"MS\" accession=\"MS:1000512\" name=\"filter string\" value=\"FTMS + p ESI Full ms [200.00-2000.00]\"/>\n<cvParam cvRef=\"MS\" accession=\"MS:1000616\" name=\"preset scan configuration\" value=\"1\"/>\n<userParam name=\"[Thermo Trailer Extra]Monoisotopic M/Z:\" value=\"810.41522216796875\" type=\"xsd:float\"/>\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -156,5 +156,11 @@ class MzmlTest(unittest.TestCase):\nself.assertEqual(mzml_spectra, reader[\n'controllerType=0 controllerNumber=1 scan=2':'controllerType=0 controllerNumber=1 scan=1'])\n+ def test_time_locator(self):\n+ with MzML(self.path) as reader:\n+ self.assertEqual(mzml_spectra[0], reader.time[0])\n+ self.assertEqual(mzml_spectra[1], reader.time[0.1])\n+ self.assertEqual(mzml_spectra, reader.time[0:0.1])\n+\nif __name__ == '__main__':\nunittest.main()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_peff.py",
"new_path": "tests/test_peff.py",
"diff": "from os import path\nimport unittest\n-import pickle\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom pyteomics import peff\n@@ -12,7 +11,8 @@ class PEFFTest(unittest.TestCase):\ndef setUp(self):\nself.peff_file = 'test.peff'\n- def test_parse(self):\n+ def test_parse(self, reader=None):\n+ if reader is None:\nreader = peff.IndexedPEFF(self.peff_file)\nself.assertEqual(reader.number_of_entries, 5)\nself.assertEqual(len(reader.header_blocks), 1)\n@@ -29,6 +29,5 @@ class PEFFTest(unittest.TestCase):\nself.assertEqual(protein.description[\"NcbiTaxId\"], 9606)\nself.assertEqual(len(protein.description.ModResPsi), 2)\n-\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add retention time indexing |
377,522 | 04.10.2018 17:58:04 | -10,800 | 045d6995dee85a64ea9e82b81d9ca88a5e65d4bc | Add warning patch in auxiliary | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.0.dev3\n\\ No newline at end of file\n+4.0.dev4\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -5,6 +5,9 @@ except NameError:\nfrom . import patch as __patch\n+import warnings\n+warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\n+\nfrom .structures import (\nPyteomicsError, Charge, ChargeList,\n_parse_charge, BasicComposition,\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add warning patch in auxiliary |
377,525 | 08.10.2018 03:47:33 | 0 | ad6b512ab210f864c82dbb6443c44f167701ea60 | change default fasta delimiter and label to support '>' characters in protein description | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -216,8 +216,8 @@ def _reconstruct(cls, args, kwargs):\nclass IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\n\"\"\"Indexed FASTA parser. Supports direct indexing by matched labels.\"\"\"\n- delimiter = '>'\n- label = r'^>(.*)'\n+ delimiter = '\\n>'\n+ label = '^[\\n]>(.*)'\ndef __init__(self, source, ignore_comments=False, parser=None, **kwargs):\n\"\"\"Create an indexed FASTA parser object.\n@@ -243,9 +243,9 @@ class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\nblock_size : int or None, optional, keyword only\nNumber of bytes to consume at once.\ndelimiter : str or None, optional, keyword only\n- Overrides the FASTA record delimiter (default is ``'>'``).\n+ Overrides the FASTA record delimiter (default is ``'\\n>'``).\nlabel : str or None, optional, keyword only\n- Overrides the FASTA record label pattern. Default is ``r'^>(.*)'``.\n+ Overrides the FASTA record label pattern. Default is ``'^[\\n]>(.*)'``.\nlabel_group : int or str, optional, keyword only\nOverrides the matched group used as key in the byte offset index.\nThis in combination with `label` can be used to extract fields from headers.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | change default fasta delimiter and label to support '>' characters in protein description |
377,525 | 08.10.2018 05:09:05 | 0 | fd909ac8367e686d56061cf0509ba89e539be313 | fasta.py edited online with Bitbucket | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -217,7 +217,7 @@ def _reconstruct(cls, args, kwargs):\nclass IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\n\"\"\"Indexed FASTA parser. Supports direct indexing by matched labels.\"\"\"\ndelimiter = '\\n>'\n- label = '^[\\n]>(.*)'\n+ label = '^[\\n]?>(.*)'\ndef __init__(self, source, ignore_comments=False, parser=None, **kwargs):\n\"\"\"Create an indexed FASTA parser object.\n@@ -245,7 +245,7 @@ class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\ndelimiter : str or None, optional, keyword only\nOverrides the FASTA record delimiter (default is ``'\\n>'``).\nlabel : str or None, optional, keyword only\n- Overrides the FASTA record label pattern. Default is ``'^[\\n]>(.*)'``.\n+ Overrides the FASTA record label pattern. Default is ``'^[\\n]?>(.*)'``.\nlabel_group : int or str, optional, keyword only\nOverrides the matched group used as key in the byte offset index.\nThis in combination with `label` can be used to extract fields from headers.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | fasta.py edited online with Bitbucket |
377,522 | 08.10.2018 19:31:43 | -10,800 | b8b319f3ab77f85a4adf2d9f7c1fe4f43e29a277 | Tweak mgf and fasta label patterns to strip whitespace at the end | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -217,7 +217,7 @@ def _reconstruct(cls, args, kwargs):\nclass IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\n\"\"\"Indexed FASTA parser. Supports direct indexing by matched labels.\"\"\"\ndelimiter = '\\n>'\n- label = '^[\\n]?>(.*)'\n+ label = r'^[\\n]?>(.*)\\s*'\ndef __init__(self, source, ignore_comments=False, parser=None, **kwargs):\n\"\"\"Create an indexed FASTA parser object.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -249,7 +249,7 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\n\"\"\"\ndelimiter = 'BEGIN IONS'\n- label = r'TITLE=([^\\n]*\\w)\\r?\\n'\n+ label = r'TITLE=([^\\n]*\\w)\\s*'\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\ndtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Tweak mgf and fasta label patterns to strip whitespace at the end |
377,522 | 13.12.2018 17:29:26 | -10,800 | cbb79614296fbe729b01ee6ce3f8bd9a46a43903 | Amend IndexedMGF.__init__, fix MGF time indexing | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -889,13 +889,6 @@ class TaskMappingMixin(object):\n------\nobject\nThe work item returned by the target function.\n-\n- Deleted Parameters\n- ------------------\n- args: : class:`Sequence`\n- Additional arguments to be passed to the target function\n- kwargs: : class:`Mapping`\n- Additional keyword arguments to be passed to the target function\n\"\"\"\nif processes < 1:\nprocesses = _NPROC\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -245,7 +245,6 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\nheader : dict\nThe file header.\n-\n\"\"\"\ndelimiter = 'BEGIN IONS'\n@@ -253,7 +252,7 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\ndtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n- aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding,\n+ aux.TimeOrderedIndexedReaderMixin.__init__(self, source, self._read, False, (), {}, encoding,\nblock_size, _skip_index=_skip_index)\nMGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Amend IndexedMGF.__init__, fix MGF time indexing |
377,522 | 14.12.2018 00:06:16 | -10,800 | a615604907310c02c5beafc033e294fd62c8154f | Fix and refactoring in ms1 | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -240,11 +240,14 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\nand 'params' stores a :py:class:`dict` of parameters (keys and values are\n:py:class:`str`, keys corresponding to MGF, lowercased).\n+\nAttributes\n----------\nheader : dict\nThe file header.\n+ time : RTLocator\n+ A property used for accessing spectra by retention time.\n\"\"\"\ndelimiter = 'BEGIN IONS'\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms1.py",
"new_path": "pyteomics/ms1.py",
"diff": "@@ -52,66 +52,117 @@ try:\nimport numpy as np\nexcept ImportError:\nnp = None\n+\n+\n+class MS1Base():\n_array_keys = ['m/z array', 'intensity array']\n+ def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None):\n+ if convert_arrays and np is None:\n+ raise aux.PyteomicsError('numpy is required for array conversion')\n+ self._convert_arrays = convert_arrays\n+ self._dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in self._array_keys}\n+ self._use_header = use_header\n+ if use_header:\n+ self._header = self._read_header()\n+ else:\n+ self._header = None\n+ self._source_name = getattr(source, 'name', str(source))\n-@aux._file_reader()\n-def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dtype=None):\n- \"\"\"Read an MS1 file and return entries iteratively.\n+ @property\n+ def header(self):\n+ return self._header\n- Read the specified MS1 file, **yield** spectra one by one.\n- Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array',\n- 'intensity array', and 'params'. 'm/z array' and\n- 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n- and 'params' stores a :py:class:`dict` of parameters.\n+ def _read_header_lines(self, lines):\n+ header = {}\n+ for line in lines:\n+ if line[0] != 'H':\n+ break\n+ l = line.split('\\t', 2)\n+ if len(l) < 3:\n+ l = line.split(None, 2)\n+ key = l[1]\n+ val = l[2].strip()\n+ header[key] = val\n+ return header\n- Parameters\n- ----------\n+ def _read_spectrum_lines(self, lines):\n+ reading_spectrum = False\n+ params = {}\n+ masses = []\n+ intensities = []\n+ if self._use_header: params.update(self.header)\n- source : str or file or None, optional\n- A file object (or file name) with data in MS1 format. Default is\n- :py:const:`None`, which means read standard input.\n+ def make_out():\n+ out = {'params': params}\n+ if self._convert_arrays:\n+ data = {'m/z array': masses, 'intensity array': intensities}\n+ for key, values in data.items():\n+ out[key] = np.array(values, dtype=self._dtype_dict.get(key))\n+ else:\n+ out['m/z array'] = masses\n+ out['intensity array'] = intensities\n+ return out\n- use_header : bool, optional\n- Add the info from file header to each dict. Spectrum-specific parameters\n- override those from the header in case of conflict.\n- Default is :py:const:`False`.\n+ for line in lines:\n+ sline = line.strip().split(None, 2)\n+ if not reading_spectrum:\n+ if sline[0] == 'S':\n+ reading_spectrum = True\n+ params['scan'] = tuple(sline[1:])\n+ # otherwise we are not interested; do nothing, just move along\n+ else:\n+ if not sline:\n+ pass\n+ elif sline[0] == 'S':\n+ return make_out()\n- convert_arrays : bool, optional\n- If :py:const:`False`, m/z and intensities will be returned as regular lists.\n- If :py:const:`True` (default), they will be converted to regular :py:class:`numpy.ndarray`'s.\n- Conversion requires :py:mod:`numpy`.\n+ else:\n+ if sline[0] == 'I': # spectrum-specific parameters!\n+ params[sline[1]] = sline[2]\n+ else: # this must be a peak list\n+ try:\n+ masses.append(float(sline[0])) # this may cause\n+ intensities.append(float(sline[1])) # exceptions...\\\n+ except ValueError:\n+ raise aux.PyteomicsError(\n+ 'Error when parsing %s. Line: %s' %\n+ (self._source_name, line))\n+ except IndexError:\n+ pass\n- dtype : type or str or dict, optional\n- dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\n- Keys should be 'm/z array' and/or 'intensity array'.\n- Returns\n- -------\n+class MS1(aux.FileReader, MS1Base):\n+ def __init__(self, source=None, use_header=True, convert_arrays=True, dtype=None, encoding=None):\n+ aux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n+ MS1Base.__init__(self, source, use_header, convert_arrays, dtype)\n+ self.encoding = encoding\n- out : FileReader\n- \"\"\"\n- if convert_arrays and np is None:\n- raise aux.PyteomicsError('numpy is required for array conversion')\n- dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in _array_keys}\n- header = read_header(source)\n+ @aux._keepstate_method\n+ def _read_header(self):\n+ return self._read_header_lines(self._source)\n+\n+ def _read_spectrum(self, firstline):\n+ return self._read_spectrum_lines(self._source, firstline)\n+\n+ def _read(self):\nreading_spectrum = False\nparams = {}\nmasses = []\nintensities = []\n- if use_header: params.update(header)\n+ if self._use_header: params.update(self.header)\ndef make_out():\nout = {'params': params}\n- if convert_arrays:\n+ if self._convert_arrays:\ndata = {'m/z array': masses, 'intensity array': intensities}\nfor key, values in data.items():\n- out[key] = np.array(values, dtype=dtype_dict.get(key))\n+ out[key] = np.array(values, dtype=self._dtype_dict.get(key))\nelse:\nout['m/z array'] = masses\nout['intensity array'] = intensities\nreturn out\n- for line in source:\n+ for line in self._source:\nsline = line.strip().split(None, 2)\nif not reading_spectrum:\nif sline[0] == 'S':\n@@ -123,7 +174,8 @@ def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dty\npass\nelif sline[0] == 'S':\nyield make_out()\n- params = dict(header) if use_header else {}\n+ params = dict(self.header) if self._use_header else {}\n+ params['scan'] = tuple(sline[1:])\nmasses = []\nintensities = []\nelse:\n@@ -136,13 +188,13 @@ def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dty\nexcept ValueError:\nraise aux.PyteomicsError(\n'Error when parsing %s. Line: %s' %\n- (source.name, line))\n+ (self._source_name, line))\nexcept IndexError:\npass\nyield make_out()\n-@aux._keepstate\n+\ndef read_header(source):\n\"\"\"\nRead the specified MS1 file, get the parameters specified in the header\n@@ -159,17 +211,46 @@ def read_header(source):\nheader : dict\n\"\"\"\n- with aux._file_obj(source, 'r') as source:\n- header = {}\n- for line in source:\n- if line[0] != 'H':\n- break\n- l = line.split('\\t', 2)\n- if len(l) < 3:\n- l = line.split(None, 2)\n- key = l[1]\n- val = l[2].strip()\n- header[key] = val\n- return header\n+ return read(source, use_header=True).header\n+\n+\n+def read(source=None, use_header=False, convert_arrays=2, dtype=None):\n+ \"\"\"Read an MS1 file and return entries iteratively.\n+\n+ Read the specified MS1 file, **yield** spectra one by one.\n+ Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array',\n+ 'intensity array', and 'params'. 'm/z array' and\n+ 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n+ and 'params' stores a :py:class:`dict` of parameters.\n+\n+ Parameters\n+ ----------\n+\n+ source : str or file or None, optional\n+ A file object (or file name) with data in MS1 format. Default is\n+ :py:const:`None`, which means read standard input.\n+\n+ use_header : bool, optional\n+ Add the info from file header to each dict. Spectrum-specific parameters\n+ override those from the header in case of conflict.\n+ Default is :py:const:`False`.\n+\n+ convert_arrays : bool, optional\n+ If :py:const:`False`, m/z and intensities will be returned as regular lists.\n+ If :py:const:`True` (default), they will be converted to regular :py:class:`numpy.ndarray`'s.\n+ Conversion requires :py:mod:`numpy`.\n+\n+ dtype : type or str or dict, optional\n+ dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\n+ Keys should be 'm/z array' and/or 'intensity array'.\n+\n+ Returns\n+ -------\n+\n+ out : :py:class:`MS1Base`\n+ An instance of :py:class:`MS1` or :py:class:`IndexedMS1`, depending on `use_index` and `source`.\n+ \"\"\"\n+ return MS1(source, use_header, convert_arrays, dtype)\n+\nchain = aux._make_chain(read, 'read')\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -97,6 +97,7 @@ This module requires :py:mod:`lxml`.\nfrom lxml import etree\nfrom . import xml, auxiliary as aux, _schema_defaults\n+\nclass PepXML(xml.MultiProcessingXML):\n\"\"\"Parser class for pepXML files.\"\"\"\nfile_format = 'pepXML'\n@@ -198,6 +199,7 @@ class PepXML(xml.MultiProcessingXML):\ninfo['search_hit'].sort(key=lambda x: x['hit_rank'])\nreturn info\n+\ndef read(source, read_schema=False, iterative=True, **kwargs):\n\"\"\"Parse `source` and iterate through peptide-spectrum matches.\n@@ -225,6 +227,7 @@ def read(source, read_schema=False, iterative=True, **kwargs):\nreturn PepXML(source, read_schema=read_schema, iterative=iterative)\n+\ndef iterfind(source, path, **kwargs):\n\"\"\"Parse `source` and yield info on elements with specified local\nname or by specified \"XPath\".\n@@ -271,8 +274,10 @@ def iterfind(source, path, **kwargs):\n\"\"\"\nreturn PepXML(source, **kwargs).iterfind(path, **kwargs)\n+\nversion_info = xml._make_version_info(PepXML)\n+\ndef roc_curve(source):\n\"\"\"Parse source and return a ROC curve for peptideprophet analysis.\n@@ -304,8 +309,10 @@ def roc_curve(source):\nreturn sorted(roc_curve, key=lambda x: x['min_prob'])\n+\nchain = aux._make_chain(read, 'read')\n+\ndef _is_decoy_prefix(psm, prefix='DECOY_'):\n\"\"\"Given a PSM dict, return :py:const:`True` if all protein names for\nthe PSM start with ``prefix``, and :py:const:`False` otherwise. This\n@@ -326,10 +333,12 @@ def _is_decoy_prefix(psm, prefix='DECOY_'):\nreturn all(protein['protein'].startswith(prefix)\nfor protein in psm['search_hit'][0]['proteins'])\n+\ndef _is_decoy_suffix(psm, suffix='_DECOY'):\nreturn all(protein['protein'].endswith(suffix)\nfor protein in psm['search_hit'][0]['proteins'])\n+\nis_decoy = _is_decoy_prefix\nfdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)\n@@ -339,6 +348,7 @@ qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)\nfilter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, _key, qvalues)\nfilter.chain = aux._make_chain(filter, 'filter', True)\n+\ndef DataFrame(*args, **kwargs):\n\"\"\"Read pepXML output files into a :py:class:`pandas.DataFrame`.\n@@ -408,6 +418,7 @@ def DataFrame(*args, **kwargs):\nyield info\nreturn pd.DataFrame(gen_items(), **pd_kwargs)\n+\ndef filter_df(*args, **kwargs):\n\"\"\"Read pepXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\nPositional arguments can be pepXML files or DataFrames.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1617,7 +1617,8 @@ ms1_spectra = [{'intensity array': makeCA([ 0. , 20.0522 , 29.26406, 30.\n'params': {'BPI': '713524',\n'BPM': '544.2904',\n'RTime': '1.32083',\n- 'TIC': '2694200'}}]\n+ 'TIC': '2694200',\n+ 'scan': ('2', '2')}}]\nms1_spectra_lists = [{'intensity array': [0., 20.0522, 29.26406, 30.04175, 20.19221, 11.58895, 0.],\n'm/z array': [2.51263, 82.51282, 82.51301, 82.51321, 82.5134, 82.51359, 82.51378],\n@@ -1631,7 +1632,8 @@ ms1_spectra_lists = [{'intensity array': [0., 20.0522, 29.26406, 30.04175, 20.19\n'params': {'BPI': '713524',\n'BPM': '544.2904',\n'RTime': '1.32083',\n- 'TIC': '2694200'}}]\n+ 'TIC': '2694200',\n+ 'scan': ('2', '2')}}]\nms1_header = {'CreationDate': 'Sat Jun 03 15:25:10 2017',\n'Extractor version': 'Xcalibur',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix and refactoring in ms1 |
377,522 | 14.12.2018 01:21:55 | -10,800 | c3715b9584fd60a35e1e009f09f3a74e700fda87 | Provide IndexedMS1 | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms1.py",
"new_path": "pyteomics/ms1.py",
"diff": "@@ -93,6 +93,8 @@ class MS1Base():\nif self._use_header: params.update(self.header)\ndef make_out():\n+ if 'RTime' in params:\n+ params['RTime'] = float(params['RTime'])\nout = {'params': params}\nif self._convert_arrays:\ndata = {'m/z array': masses, 'intensity array': intensities}\n@@ -105,6 +107,7 @@ class MS1Base():\nfor line in lines:\nsline = line.strip().split(None, 2)\n+ if not sline: continue\nif not reading_spectrum:\nif sline[0] == 'S':\nreading_spectrum = True\n@@ -129,10 +132,11 @@ class MS1Base():\n(self._source_name, line))\nexcept IndexError:\npass\n+ return make_out()\nclass MS1(aux.FileReader, MS1Base):\n- def __init__(self, source=None, use_header=True, convert_arrays=True, dtype=None, encoding=None):\n+ def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None):\naux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\nMS1Base.__init__(self, source, use_header, convert_arrays, dtype)\nself.encoding = encoding\n@@ -152,6 +156,8 @@ class MS1(aux.FileReader, MS1Base):\nif self._use_header: params.update(self.header)\ndef make_out():\n+ if 'RTime' in params:\n+ params['RTime'] = float(params['RTime'])\nout = {'params': params}\nif self._convert_arrays:\ndata = {'m/z array': masses, 'intensity array': intensities}\n@@ -195,7 +201,86 @@ class MS1(aux.FileReader, MS1Base):\nyield make_out()\n-def read_header(source):\n+class IndexedMS1(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexedTextReader, MS1Base):\n+ \"\"\"\n+ A class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\n+ parsing. Specific spectra can be accessed by title using the indexing syntax in constant time.\n+ If created using a file object, it needs to be opened in binary mode.\n+\n+ When iterated, :py:class:`IndexedMGF` object yields spectra one by one.\n+ Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n+ 'intensity array', 'charge array' and 'params'. 'm/z array' and\n+ 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n+ 'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,\n+ and 'params' stores a :py:class:`dict` of parameters (keys and values are\n+ :py:class:`str`, keys corresponding to MGF, lowercased).\n+\n+\n+ Attributes\n+ ----------\n+\n+ header : dict\n+ The file header.\n+ time : RTLocator\n+ A property used for accessing spectra by retention time.\n+ \"\"\"\n+\n+ delimiter = '\\nS'\n+ label = r'^[\\n]?S\\s+(\\S+)'\n+\n+ def __init__(self, source=None, use_header=False, convert_arrays=True,\n+ dtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n+ aux.TimeOrderedIndexedReaderMixin.__init__(self, source, self._read, False, (), {}, encoding,\n+ block_size, _skip_index=_skip_index)\n+ MS1Base.__init__(self, source, use_header, convert_arrays, dtype)\n+\n+ def __reduce_ex__(self, protocol):\n+ return (self.__class__,\n+ (self._source_init, False, self._convert_arrays,\n+ self._read_charges, self._dtype_dict, self.encoding, self.block_size, True),\n+ self.__getstate__())\n+\n+ def __getstate__(self):\n+ state = super(IndexedMS1, self).__getstate__()\n+ state['use_header'] = self._use_header\n+ state['header'] = self._header\n+ return state\n+\n+ def __setstate__(self, state):\n+ super(IndexedMS1, self).__setstate__(state)\n+ self._use_header = state['use_header']\n+ self._header = state['header']\n+\n+ @aux._keepstate_method\n+ def _read_header(self):\n+ try:\n+ first = next(v for v in self._offset_index.values())[0]\n+ except StopIteration: # the index is empty, no spectra in file\n+ first = -1\n+ header_lines = self.read(first).decode(self.encoding).split('\\n')\n+ return self._read_header_lines(header_lines)\n+\n+ def _item_from_offsets(self, offsets):\n+ start, end = offsets\n+ lines = self._read_lines_from_offsets(start, end)\n+ return self._read_spectrum_lines(lines)\n+\n+ def _read(self, **kwargs):\n+ for _, offsets in self._offset_index.items():\n+ spectrum = self._item_from_offsets(offsets)\n+ yield spectrum\n+\n+ def get_spectrum(self, key):\n+ return self.get_by_id(key)\n+\n+ def _get_time(self, spectrum):\n+ try:\n+ return spectrum['params']['RTime']\n+ except KeyError:\n+ raise aux.PyteomicsError('RT information not found.')\n+\n+\n+def read_header(source, *args, **kwargs):\n\"\"\"\nRead the specified MS1 file, get the parameters specified in the header\nas a :py:class:`dict`.\n@@ -211,10 +296,11 @@ def read_header(source):\nheader : dict\n\"\"\"\n- return read(source, use_header=True).header\n+ kwargs['use_header'] = True\n+ return read(source, *args, **kwargs).header\n-def read(source=None, use_header=False, convert_arrays=2, dtype=None):\n+def read(source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None):\n\"\"\"Read an MS1 file and return entries iteratively.\nRead the specified MS1 file, **yield** spectra one by one.\n@@ -250,7 +336,7 @@ def read(source=None, use_header=False, convert_arrays=2, dtype=None):\nout : :py:class:`MS1Base`\nAn instance of :py:class:`MS1` or :py:class:`IndexedMS1`, depending on `use_index` and `source`.\n\"\"\"\n- return MS1(source, use_header, convert_arrays, dtype)\n+ return MS1(source, use_header, convert_arrays, dtype, encoding)\nchain = aux._make_chain(read, 'read')\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1607,7 +1607,7 @@ ms1_spectra = [{'intensity array': makeCA([ 0. , 20.0522 , 29.26406, 30.\n82.51378]),\n'params': {'BPI': '585566',\n'BPM': '544.2904',\n- 'RTime': '0.987225',\n+ 'RTime': 0.987225,\n'TIC': '3728760',\n'scan': ('1', '1')}},\n{'intensity array': makeCA([ 0. , 31.2197 , 37.46051, 44.36585, 49.12939, 44.33195,\n@@ -1616,7 +1616,7 @@ ms1_spectra = [{'intensity array': makeCA([ 0. , 20.0522 , 29.26406, 30.\n82.64466, 82.64485, 82.64504]),\n'params': {'BPI': '713524',\n'BPM': '544.2904',\n- 'RTime': '1.32083',\n+ 'RTime': 1.32083,\n'TIC': '2694200',\n'scan': ('2', '2')}}]\n@@ -1624,14 +1624,14 @@ ms1_spectra_lists = [{'intensity array': [0., 20.0522, 29.26406, 30.04175, 20.19\n'm/z array': [2.51263, 82.51282, 82.51301, 82.51321, 82.5134, 82.51359, 82.51378],\n'params': {'BPI': '585566',\n'BPM': '544.2904',\n- 'RTime': '0.987225',\n+ 'RTime': 0.987225,\n'TIC': '3728760',\n'scan': ('1', '1')}},\n{'intensity array': [0., 31.2197, 37.46051, 44.36585, 49.12939, 44.33195, 35.1637, 33.48032, 0.],\n'm/z array': [82.6435, 82.6437, 82.64389, 82.64408, 82.64427, 82.64447, 82.64466, 82.64485, 82.64504],\n'params': {'BPI': '713524',\n'BPM': '544.2904',\n- 'RTime': '1.32083',\n+ 'RTime': 1.32083,\n'TIC': '2694200',\n'scan': ('2', '2')}}]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_ms1.py",
"new_path": "tests/test_ms1.py",
"diff": "@@ -3,7 +3,7 @@ import numpy as np\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport unittest\n-from pyteomics.ms1 import read, read_header\n+from pyteomics.ms1 import read, read_header, MS1, IndexedMS1, chain\nimport data\nclass MS1Test(unittest.TestCase):\n@@ -17,7 +17,8 @@ class MS1Test(unittest.TestCase):\ndef test_read(self):\n# http://stackoverflow.com/q/14246983/1258041\nself.assertEqual(data.ms1_spectra, list(read(self.path)))\n- with read(self.path) as reader:\n+ for reader in [read, MS1, IndexedMS1, chain]:\n+ with reader(self.path) as reader:\nself.assertEqual(data.ms1_spectra, list(reader))\ndef test_read_array_conversion(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Provide IndexedMS1 |
377,522 | 14.12.2018 16:11:21 | -10,800 | 0874cbd090bc4610ec90019316014a9cce88899c | Update ms1.read and docs | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -395,7 +395,7 @@ def read(*args, **kwargs):\n:py:class:`IndexedMGF` is created. This facilitates random access by spectrum titles.\nIf an open file is passed as `source`, it needs to be open in binary mode.\n- If :py:const:`False`, an instance of :py:class:`SequentialMGF` is created. It reads\n+ If :py:const:`False`, an instance of :py:class:`MGF` is created. It reads\n`source` in text mode and is suitable for iterative parsing. Access by spectrum title\nrequires linear search and thus takes linear time.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms1.py",
"new_path": "pyteomics/ms1.py",
"diff": "@@ -10,9 +10,11 @@ human-readable format for MS1 data. It allows storing MS1 peak lists and\nexprimental parameters.\nThis module provides minimalistic infrastructure for access to data stored in\n-MS1 files. The most important function is :py:func:`read`, which\n-reads spectra and related information as saves them into human-readable\n-:py:class:`dicts`.\n+MS1 files.\n+Two main classes are :py:class:`MS1`, which provides an iterative, text-mode parser,\n+and :py:class:`IndexedMS1`, which is a binary-mode parser that supports random access using scan IDs\n+and retention times.\n+The function :py:func:`read` helps dispatch between the two classes.\nAlso, common parameters can be read from MS1 file header with\n:py:func:`read_header` function.\n@@ -55,6 +57,7 @@ except ImportError:\nclass MS1Base():\n+ \"\"\"Abstract class representing an MS1 file. Subclasses implement different approaches to parsing.\"\"\"\n_array_keys = ['m/z array', 'intensity array']\ndef __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None):\nif convert_arrays and np is None:\n@@ -136,6 +139,23 @@ class MS1Base():\nclass MS1(aux.FileReader, MS1Base):\n+ \"\"\"\n+ A class representing an MS1 file. Supports the `with` syntax and direct iteration for sequential\n+ parsing.\n+\n+ :py:class:`MGF` object behaves as an iterator, **yielding** spectra one by one.\n+ Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array',\n+ 'intensity array', and 'params'. 'm/z array' and\n+ 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n+ and 'params' stores a :py:class:`dict` of parameters.\n+\n+ Attributes\n+ ----------\n+\n+ header : dict\n+ The file header.\n+\n+ \"\"\"\ndef __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None):\naux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\nMS1Base.__init__(self, source, use_header, convert_arrays, dtype)\n@@ -300,7 +320,7 @@ def read_header(source, *args, **kwargs):\nreturn read(source, *args, **kwargs).header\n-def read(source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None):\n+def read(*args, **kwargs):\n\"\"\"Read an MS1 file and return entries iteratively.\nRead the specified MS1 file, **yield** spectra one by one.\n@@ -330,13 +350,36 @@ def read(source=None, use_header=False, convert_arrays=True, dtype=None, encodin\ndtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\nKeys should be 'm/z array' and/or 'intensity array'.\n+ encoding : str, optional\n+ File encoding.\n+\n+ use_index : bool, optional\n+ Determines which parsing method to use. If :py:const:`True` (default), an instance of\n+ :py:class:`IndexedMS1` is created. This facilitates random access by scan titles.\n+ If an open file is passed as `source`, it needs to be open in binary mode.\n+\n+ If :py:const:`False`, an instance of :py:class:`MS1` is created. It reads\n+ `source` in text mode and is suitable for iterative parsing.\n+\n+ block_size : int, optinal\n+ Size of the chunk (in bytes) used to parse the file when creating the byte offset index.\n+ (Accepted only for :py:class:`IndexedMGF`.)\n+\nReturns\n-------\nout : :py:class:`MS1Base`\nAn instance of :py:class:`MS1` or :py:class:`IndexedMS1`, depending on `use_index` and `source`.\n\"\"\"\n- return MS1(source, use_header, convert_arrays, dtype, encoding)\n+ if args:\n+ source = args[0]\n+ else:\n+ source = kwargs.get('source')\n+ use_index = kwargs.pop('use_index', None)\n+ use_index = aux._check_use_index(source, use_index, True)\n+ tp = IndexedMS1 if use_index else MS1\n+\n+ return tp(*args, **kwargs)\nchain = aux._make_chain(read, 'read')\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update ms1.read and docs |
377,522 | 14.12.2018 16:39:55 | -10,800 | f28c5e7204bf989e916d790f9ab7d61a4011cea8 | Add indexing to protxml | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -75,13 +75,16 @@ This module requres :py:mod:`lxml`.\nfrom . import xml, auxiliary as aux, _schema_defaults\nimport operator as op\n-class ProtXML(xml.XML):\n+class ProtXML(xml.MultiProcessingXML):\n\"\"\"Parser class for protXML files.\"\"\"\nfile_format = 'protXML'\n_root_element = 'protein_summary'\n_default_schema = _schema_defaults._protxml_schema_defaults\n# _default_version = None\n_default_iter_tag = 'protein_group'\n+ _indexed_tag_keys = {'protein_group': 'group_number'}\n+ _default_id_attr = 'group_number'\n+ _indexed_tags = {'protein_group'}\n_structures_to_flatten = {'annotation'}\n# attributes which contain unconverted values\n_convert_items = {'float': {'pct_spectrum_ids'},\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add indexing to protxml |
377,522 | 21.12.2018 02:52:27 | -10,800 | 7595a839b72e842c3703d8e787a5b7bc012e8b4c | Add traml doc page | [
{
"change_type": "MODIFY",
"old_path": "doc/source/api.rst",
"new_path": "doc/source/api.rst",
"diff": "@@ -26,6 +26,7 @@ Contents:\napi/mzid\napi/featurexml\napi/trafoxml\n+ api/traml\napi/auxiliary\napi/pylab_aux\napi/xml\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "doc/source/api/traml.rst",
"diff": "+.. automodule:: pyteomics.traml\n+\n+ .. autofunction:: chain\n+ .. py:function :: chain.from_iterable(files, **kwargs)\n+\n+ Chain :py:func:`read` for several files.\n+ Keyword arguments are passed to the :py:func:`read` function.\n+\n+ Parameters\n+ ----------\n+ files : iterable\n+ Iterable of file names or file objects.\n+\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add traml doc page |
377,522 | 21.12.2018 03:14:57 | -10,800 | 2b8d500f4a8acb3586cadc37c0469b16d697b1a2 | Fix traml spec URL | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/traml.py",
"new_path": "pyteomics/traml.py",
"diff": "@@ -6,7 +6,7 @@ Summary\n-------\nTraML is a standard rich XML-format for targeted mass spectrometry method definitions.\n-Please refer to `psidev.info <http://www.psidev.info/index.php?q=node/257>`_\n+Please refer to `psidev.info <http://www.psidev.info/traml>`_\nfor the detailed specification of the format and structure of TraML files.\nThis module provides a minimalistic way to extract information from TraML\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix traml spec URL |
377,522 | 21.12.2018 19:38:08 | -10,800 | 90f2aec646c56d678bb375daf7ef17fc7491c34d | Tweak tandem test | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -22,7 +22,7 @@ from .file_helpers import (\nOffsetIndex, HierarchicalOffsetIndex,\n_file_reader, _file_writer,\n_make_chain, _check_use_index, FileReadingProcess, TaskMappingMixin,\n- serializer, ChainBase)\n+ serializer, ChainBase, TableJoiner)\nfrom .math import (\nlinear_regression, linear_regression_perpendicular,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/target_decoy.py",
"new_path": "pyteomics/auxiliary/target_decoy.py",
"diff": "@@ -9,9 +9,9 @@ except NameError:\nbasestring = (str, bytes)\ntry:\n- from collections import Container, Sized\n-except ImportError:\nfrom collections.abc import Container, Sized\n+except ImportError:\n+ from collections import Container, Sized\nfrom bisect import bisect_right\nfrom contextlib import contextmanager\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_electrochem.py",
"new_path": "tests/test_electrochem.py",
"diff": "@@ -84,7 +84,5 @@ class ElectrochemTest(unittest.TestCase):\n'E': 1, 'F': 1, 'G': 1, 'H': 1, '-OH': 1}, i))\n-\n-\nif __name__ == '__main__':\nunittest.main()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_tandem.py",
"new_path": "tests/test_tandem.py",
"diff": "@@ -2,26 +2,28 @@ from os import path\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport unittest\n-from pyteomics.tandem import *\n+from pyteomics import tandem\nfrom data import tandem_spectra\n+\nclass TandemTest(unittest.TestCase):\ndef setUp(self):\nself.maxDiff = None\nself.path = 'test.t.xml'\ndef testReadPSM(self):\n- for func in [TandemXML, read, chain,\n- lambda x, **kw: chain.from_iterable([x], **kw),\n- lambda x, **kw: filter(x, fdr=1, full_output=False),\n- lambda x, **kw: filter.chain(x, fdr=1, full_output=False),\n- lambda x, **kw: filter.chain.from_iterable([x], fdr=1, full_output=False)]:\n+ for func in [tandem.TandemXML, tandem.read, tandem.chain,\n+ lambda x, **kw: tandem.chain.from_iterable([x], **kw),\n+ lambda x, **kw: tandem.filter(x, fdr=1, full_output=False),\n+ lambda x, **kw: tandem.filter.chain(x, fdr=1, full_output=False),\n+ lambda x, **kw: tandem.filter.chain.from_iterable([x], fdr=1, full_output=False)]:\nfor it in range(2):\nwith func(self.path, iterative=it) as r:\nself.assertEqual(list(r), tandem_spectra)\ndef test_df(self):\n- df = DataFrame(self.path)\n+ df = tandem.DataFrame(self.path)\n+ self.assertEqual(df.shape, (1, 29))\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Tweak tandem test |
377,522 | 25.12.2018 19:40:03 | -10,800 | 466427bc7b4bf64cfb9ffd4b97c914d7559bbd64 | Start documentaion on indexing | [
{
"change_type": "MODIFY",
"old_path": "doc/source/achrom.rst",
"new_path": "doc/source/achrom.rst",
"diff": "@@ -12,7 +12,7 @@ the BioLCCC model of liquid chromatography of polypeptides.\n:py:mod:`pyteomics.biolccc` is not distributed with the main package and has\nto be installed separately. :py:mod:`pyteomics.biolccc` can be downloaded from\nhttp://pypi.python.org/pypi/pyteomics.biolccc, and the project documentation\n-is hosted at http://packages.python.org/pyteomics.biolccc.\n+is hosted at http://theorchromo.ru/docs.\nAdditive model of peptide chromatography\n----------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data.rst",
"new_path": "doc/source/data.rst",
"diff": "@@ -10,19 +10,23 @@ results and protein databases.\n.. contents:: Document contents\n:backlinks: top\n+ :depth: 2\n.. include :: data/text.rst\n.. include :: data/xml.rst\n+.. include :: data/indexing.rst\n+\nFDR estimation and filtering\n============================\n-Three modules for reading proteomics search engine output (:py:mod:`tandem`,\n-:py:mod:`pepxml` and :py:mod:`mzid`) expose similar functions\n-:py:func:`is_decoy`, :py:func:`fdr` and :py:func:`!filter`. These functions\n-implement the widely used\n+The modules for reading proteomics search engine or post-processing output\n+(:py:mod:`tandem`, :py:mod:`pepxml`, :py:mod:`mzid` and :py:mod:`protxml`)\n+expose similar functions\n+:py:func:`is_decoy`, :py:func:`fdr` and :py:func:`!filter`.\n+These functions implement the widely used\nTarget-Decoy Approach (TDA) to estimation of False Discovery Rate (FDR).\nThe :py:func:`is_decoy` function is supposed to determine if a particular\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "doc/source/data/indexing.rst",
"diff": "+Indexed Parsers\n+===============\n+\n+Most of the parsers implement indexing: MGF, mzML, mzXML, FASTA, PEFF, pepXML, mzIdentML, ms1, TraML, featureXML.\n+Some formats do not have indexing parsers, because there is no unique ID field in the files to identify entries.\n+\n+XML parser classes are called according to the format, e.g. :py:class:`pyteomics.mzml.MzML`. Text format parsers\n+that implement indexing are called with the word \"Indexed\", e.g. `:py:class:`pyteomics.fasta.IndexedFASTA`,\n+as opposed to :py:class:`pyteomics.fasta.FASTA`, which does not implement indexing.\n+This distinction is due to the fact that indexed parsers need to open the files in binary mode.\n+This may affect performance for text-based formats and is not always backwards-compatible\n+(you cannot instantiate an indexed parser class using a previously opened file if it is in text mode).\n+XML files, on the other hand, are always meant to be opened in binary mode.\n+So, there is no duplication of classes for XML formats, but indexing can still be disabled by passing\n+``use_index=False`` to the class constructor or the :py:func:`read` function.\n+\n+Basic usage\n+-----------\n+\n+Indexed parsers can be instantiated using the class name or the :py:func:`read` function::\n+\n+ In [1]: from pyteomics import mgf\n+\n+ In [2]: f = mgf.IndexedMGF('tests/test.mgf')\n+\n+ In [3]: f\n+ Out[3]: <pyteomics.mgf.IndexedMGF at 0x7fc983cbaeb8>\n+\n+ In [4]: f.close()\n+\n+ In [5]: f = mgf.read('tests/test.mgf', use_index=True)\n+\n+ In [6]: f\n+ Out[6]: <pyteomics.mgf.IndexedMGF at 0x7fc980c63898>\n+\n+\n+They support direct assignment and iteration or the `with` syntax, the same way as the older, iterative parsers.\n+\n+Parser objects can be used as dictionaries mapping entry IDs to entries::\n+\n+ In [7]: f['Spectrum 2']\n+ Out[7]:\n+ {'params': {'com': 'Based on http://www.matrixscience.com/help/data_file_help.html',\n+ 'itol': '1',\n+ 'itolu': 'Da',\n+ 'mods': 'Carbamidomethyl (C)',\n+ 'it_mods': 'Oxidation (M)',\n+ 'mass': 'Monoisotopic',\n+ 'username': 'Lou Scene',\n+ 'useremail': 'leu@altered-state.edu',\n+ 'charge': [2, 3],\n+ 'title': 'Spectrum 2',\n+ 'pepmass': (1084.9, 1234.0),\n+ 'scans': '3',\n+ 'rtinseconds': 25.0 second},\n+ 'm/z array': array([ 345.1, 370.2, 460.2, 1673.3, 1674. , 1675.3]),\n+ 'intensity array': array([ 237., 128., 108., 1007., 974., 79.]),\n+ 'charge array': masked_array(data=[3, 2, 1, 1, 1, 1],\n+ mask=False,\n+ fill_value=0)}\n+\n+Rich Indexing\n+-------------\n+\n+Indexed parsers also support positional indexing, slices of IDs and integers. ID-based slices include both\n+endpoints; integer-based slices exclude the right edge of the interval. With integer indexing, __step__\n+is also supported. Here is a self-explanatory demo of indexing functionality using a test file of two spectra::\n+\n+ In [9]: len(f['Spectrum 1':'Spectrum 2'])\n+ Out[9]: 2\n+\n+ In [10]: len(f['Spectrum 2':'Spectrum 1'])\n+ Out[10]: 2\n+\n+ In [11]: len(f[:])\n+ Out[11]: 2\n+\n+ In [12]: len(f[:1])\n+ Out[12]: 1\n+\n+ In [13]: len(f[1:0])\n+ Out[13]: 0\n+\n+ In [14]: len(f[1:0:-1])\n+ Out[14]: 1\n+\n+ In [15]: len(f[::2])\n+ Out[15]: 1\n+\n+RT-based indexing\n+.................\n+\n+In MGF, mzML and mzXML the spectra are usually time-ordered. The corresponding indexed parsers allow accessing the\n+spectra by retention time, including slices::\n+\n+Multiprocessing\n+---------------\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data/notes.rst",
"new_path": "doc/source/data/notes.rst",
"diff": "@@ -23,6 +23,11 @@ General Notes\n>>> for spectrum in reader:\n>>> ...\n+- Additionally, most modules provide one or several classes which implement different\n+ parsing modes, e.g. :py:class:`pyteomics.mgf.MGF` and :py:class:`pyteomics.mgf.IndexedMGF`.\n+ Indexed parsers build an index of file entries and thus allow **random access** in addition\n+ to **iteration**. See `Indexed Parsers`_ for a detailed description and examples.\n+\n- Apart from :py:func:`read`, which reads just one file, all modules described\nhere have functions for reading multiple files: :py:func:`chain` and\n:py:func:`chain.from_iterable`.\n@@ -34,4 +39,5 @@ General Notes\n- Throughout this section we use\n:py:func:`pyteomics.auxiliary.print_tree` to display the structure of the\n- data returned by various parsers.\n\\ No newline at end of file\n+ data returned by various parsers. Replace this call with the actual processsing\n+ that you need to perform on your files.\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data/text.rst",
"new_path": "doc/source/data/text.rst",
"diff": "@@ -15,7 +15,7 @@ Reading\n:py:func:`pyteomics.mgf.read` function allows iterating through spectrum entries.\nSpectra are represented as :py:class:`dicts`. By default, MS/MS peak lists are stored\n-as :py:class:`numpy.ndarray` objects `mass array` and `intensity array`.\n+as :py:class:`numpy.ndarray` objects `m/z array` and `intensity array`.\nFragment charges will be stored in a masked array under the `charge array` key.\nParameters are stored as a :py:class:`dict` under `params` key.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Start documentaion on indexing |
377,522 | 26.12.2018 01:49:38 | -10,800 | 78d4c2ebaabd8bcb0360463fe6b9e35372f2cdae | Update indexing doc | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- :py:class:`pyteomics.fasta.FASTABase` - common ancestor, suitable for type checking;\n- - :py:class:`pyteomics.fasta.FASTA` - text-mode, sequential parser. Does\n- what the old :py:func:`fasta.read` was doing;\n+ - :py:class:`pyteomics.fasta.FASTA` - text-mode, sequential parser; does\n+ what the old :py:func:`fasta.read` was doing. Additionally, the following subclasses perform\n+ format-specific parsing of FASTA headers:\n- :py:class:`pyteomics.fasta.UniProt`;\n- :py:class:`pyteomics.fasta.UniParc`;\nSupports direct indexing by header string;\n- :py:class:`pyteomics.fasta.TwoLayerIndexedFASTA` - additionally supports\n- indexing by extracted header fields;\n+ indexing by extracted header fields. Format-specific second indexes are available in\n+ subclasses:\n- :py:class:`pyteomics.fasta.IndexedUniProt`;\n- :py:class:`pyteomics.fasta.IndexedUniParc`;\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data.rst",
"new_path": "doc/source/data.rst",
"diff": "@@ -10,7 +10,7 @@ results and protein databases.\n.. contents:: Document contents\n:backlinks: top\n- :depth: 2\n+ :depth: 3\n.. include :: data/text.rst\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data/indexing.rst",
"new_path": "doc/source/data/indexing.rst",
"diff": "@@ -5,7 +5,7 @@ Most of the parsers implement indexing: MGF, mzML, mzXML, FASTA, PEFF, pepXML, m\nSome formats do not have indexing parsers, because there is no unique ID field in the files to identify entries.\nXML parser classes are called according to the format, e.g. :py:class:`pyteomics.mzml.MzML`. Text format parsers\n-that implement indexing are called with the word \"Indexed\", e.g. `:py:class:`pyteomics.fasta.IndexedFASTA`,\n+that implement indexing are called with the word \"Indexed\", e.g. :py:class:`pyteomics.fasta.IndexedFASTA`,\nas opposed to :py:class:`pyteomics.fasta.FASTA`, which does not implement indexing.\nThis distinction is due to the fact that indexed parsers need to open the files in binary mode.\nThis may affect performance for text-based formats and is not always backwards-compatible\n@@ -36,7 +36,7 @@ Indexed parsers can be instantiated using the class name or the :py:func:`read`\nThey support direct assignment and iteration or the `with` syntax, the same way as the older, iterative parsers.\n-Parser objects can be used as dictionaries mapping entry IDs to entries::\n+Parser objects can be used as dictionaries mapping entry IDs to entries, or as lists::\nIn [7]: f['Spectrum 2']\nOut[7]:\n@@ -59,11 +59,14 @@ Parser objects can be used as dictionaries mapping entry IDs to entries::\nmask=False,\nfill_value=0)}\n+ In [8]: f[1]['params']['title'] # positional indexing\n+ Out[8]: 'Spectrum 2'\n+\nRich Indexing\n-------------\nIndexed parsers also support positional indexing, slices of IDs and integers. ID-based slices include both\n-endpoints; integer-based slices exclude the right edge of the interval. With integer indexing, __step__\n+endpoints; integer-based slices exclude the right edge of the interval. With integer indexing, *step*\nis also supported. Here is a self-explanatory demo of indexing functionality using a test file of two spectra::\nIn [9]: len(f['Spectrum 1':'Spectrum 2'])\n@@ -93,5 +96,61 @@ RT-based indexing\nIn MGF, mzML and mzXML the spectra are usually time-ordered. The corresponding indexed parsers allow accessing the\nspectra by retention time, including slices::\n+ In [16]: f = mzxml.MzXML('tests/test.mzXML')\n+\n+ In [17]: spec = f.time[5.5] # get the spectrum closest to this retention time\n+\n+ In [18]: len(f.time[5.5:6.0]) # get spectra from a range\n+ Out[18]: 2\n+\n+\n+RT lookup is performed using binary search.\n+When retrieving ranges, the closest spectra to the start and end of the range\n+are used as endpoints, so it is possible that they are slightly outside the range.\n+\nMultiprocessing\n---------------\n+\n+Indexed parsers provide a unified interface for multiprocessing: :py:meth:`map`.\n+The method applies a user-defined function to entries from the file, calling it in different processes.\n+If the function is not provided, the parsing itself is parallelized. Depending on the format,\n+this may speed up or slow down the parsing overall.\n+:py:meth:`map` is a generator and yields items as they become available, not preserving the original order::\n+\n+ In [1]: from pyteomics import mzml\n+\n+ In [2]: f = mzml.MzML('tests/test.mzML')\n+\n+ In [3]: for spec in f.map():\n+ ...: print(spec['id'])\n+ ...:\n+ controllerType=0 controllerNumber=1 scan=2\n+ controllerType=0 controllerNumber=1 scan=1\n+\n+ In [4]: for item in f.map(lambda spec: spec['id']):\n+ ...: print(item)\n+ ...:\n+ controllerType=0 controllerNumber=1 scan=1\n+ controllerType=0 controllerNumber=1 scan=2\n+\n+\n+.. note ::\n+ To use :py:meth:`map` with lambda functions (and in some other corner cases, like\n+ parsers instantiated with pre-opened file objects), the :py:mod:`dill` package is required.\n+ This is because the target callable and the parser itself need to be pickled for multiprocessing to work.\n+\n+Apart from parser objects, :py:meth:`map` is available on objects returned by :py:func:`chain` functions\n+and :py:meth:`iterfind`::\n+\n+ In [5]: for c in f.iterfind('chromatogram').map():\n+ ...: print(c['id'])\n+ ...:\n+ TIC\n+\n+ In [6]: for spec in mzml.chain('tests/test.mzML', 'tests/test.mzML').map():\n+ ...: print(spec['id'])\n+ ...:\n+ controllerType=0 controllerNumber=1 scan=1\n+ controllerType=0 controllerNumber=1 scan=2\n+ controllerType=0 controllerNumber=1 scan=1\n+ controllerType=0 controllerNumber=1 scan=2\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update indexing doc |
377,522 | 26.12.2018 16:33:35 | -10,800 | ed93bad4b31eb2aa15c747d86dfc8bb4d1a57c63 | Update xml and indexing docs | [
{
"change_type": "MODIFY",
"old_path": "doc/source/data/indexing.rst",
"new_path": "doc/source/data/indexing.rst",
"diff": "@@ -62,6 +62,14 @@ Parser objects can be used as dictionaries mapping entry IDs to entries, or as l\nIn [8]: f[1]['params']['title'] # positional indexing\nOut[8]: 'Spectrum 2'\n+Like dictionaries, indexed parsers support membership testing and :py:func:`len`::\n+\n+ In [9]: 'Spectrum 1' in f\n+ Out[9]: True\n+\n+ In [10]: len(f)\n+ Out[10]: 2\n+\nRich Indexing\n-------------\n@@ -69,26 +77,26 @@ Indexed parsers also support positional indexing, slices of IDs and integers. ID\nendpoints; integer-based slices exclude the right edge of the interval. With integer indexing, *step*\nis also supported. Here is a self-explanatory demo of indexing functionality using a test file of two spectra::\n- In [9]: len(f['Spectrum 1':'Spectrum 2'])\n- Out[9]: 2\n-\n- In [10]: len(f['Spectrum 2':'Spectrum 1'])\n- Out[10]: 2\n-\n- In [11]: len(f[:])\n+ In [11]: len(f['Spectrum 1':'Spectrum 2'])\nOut[11]: 2\n- In [12]: len(f[:1])\n- Out[12]: 1\n+ In [12]: len(f['Spectrum 2':'Spectrum 1'])\n+ Out[12]: 2\n- In [13]: len(f[1:0])\n- Out[13]: 0\n+ In [13]: len(f[:])\n+ Out[13]: 2\n- In [14]: len(f[1:0:-1])\n+ In [14]: len(f[:1])\nOut[14]: 1\n- In [15]: len(f[::2])\n- Out[15]: 1\n+ In [15]: len(f[1:0])\n+ Out[15]: 0\n+\n+ In [16]: len(f[1:0:-1])\n+ Out[16]: 1\n+\n+ In [17]: len(f[::2])\n+ Out[17]: 1\nRT-based indexing\n.................\n@@ -96,12 +104,12 @@ RT-based indexing\nIn MGF, mzML and mzXML the spectra are usually time-ordered. The corresponding indexed parsers allow accessing the\nspectra by retention time, including slices::\n- In [16]: f = mzxml.MzXML('tests/test.mzXML')\n+ In [18]: f = mzxml.MzXML('tests/test.mzXML')\n- In [17]: spec = f.time[5.5] # get the spectrum closest to this retention time\n+ In [19]: spec = f.time[5.5] # get the spectrum closest to this retention time\n- In [18]: len(f.time[5.5:6.0]) # get spectra from a range\n- Out[18]: 2\n+ In [20]: len(f.time[5.5:6.0]) # get spectra from a range\n+ Out[20]: 2\nRT lookup is performed using binary search.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/data/xml.rst",
"new_path": "doc/source/data/xml.rst",
"diff": "@@ -60,15 +60,7 @@ Here is an example of the output:\n-> no combination\nAdditionally, :py:class:`pyteomics.mzml.MzML` objects support direct indexing\n-with spectrum IDs:\n-\n-.. code-block:: python\n-\n- >>> from pyteomics import mzml\n- >>> with mzml.MzML('tests/test.mzML') as reader:\n- >>> spectrum = reader[\"controllerType=0 controllerNumber=1 scan=1\"]\n- >>> mz = spectrum['m/z array'] # do something with \"spectrum\"\n- >>> ...\n+with spectrum IDs and all other features of `Indexed Parsers`_.\n:py:class:`pyteomics.mzml.PreIndexedMzML` offers the same functionality,\nbut it uses byte offset information found at the end of the file.\n@@ -138,7 +130,7 @@ can use the :py:class:`pyteomics.pepxml.PepXML` interface.\nReading into a pandas.DataFrame\n...............................\n-If you like working with tabular data using :py:mod:`pandas`, you can load data from pepXML files\n+If you like working with tabular data using :py:mod:`pandas`, you can load pepXML files\ndirectly into :py:class:`pandas.DataFrames`\nusing the :py:func:`pyteomics.pepxml.DataFrame` function. It can read multiple files\nat once (using :py:func:`pyteomics.pepxml.chain`) and return a combined table with\n@@ -348,7 +340,8 @@ FeatureXML\n:py:mod:`pyteomics.openms.featurexml` implements a simple parser for **.featureXML** files\nused in the `OpenMS <http://open-ms.sourceforge.net/about/>`_ framework. The usage\nis identical to other XML parsing modules. Since **featureXML** has feature IDs,\n-:py:class:`FeatureXML` objects also support direct indexing as well as iteration::\n+:py:class:`FeatureXML` objects also support direct indexing as well as iteration, among\n+the many features of `Indexed Parsers`_::\n>>> from pyteomics.openms import featurexml\n@@ -374,7 +367,7 @@ TrafoXML\n**.trafoXML** is another OpenMS format based on XML. It describes a\ntranformation produced by an RT alignment algorithm. The file basically contains a series\n-of `(from; to)` pairs corresponding to original and transformed retention times:\n+of `(from; to)` pairs corresponding to original and transformed retention times::\n>>> from pyteomics.openms import trafoxml\n>>> from_rt, to_rt = [], []\n@@ -389,6 +382,7 @@ of `(from; to)` pairs corresponding to original and transformed retention times:\nAs always, :py:func:`pyteomics.openms.trafoxml.read`\nand :py:class:`pyteomics.openms.trafoxml.TrafoXML` are interchangeable.\n+TrafoXML parsers do not support indexing because there are no IDs for specific data points in this format.\nControlled Vocabularies\n=======================\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update xml and indexing docs |
377,522 | 26.12.2018 17:46:49 | -10,800 | 49f613fb9268d8b03dc025978d9a671c4fb324e0 | Fall back to regular indexing in PreIndexedMzML | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -393,8 +393,7 @@ def iterfind(source, path, **kwargs):\nversion_info = xml._make_version_info(MzML)\n-chain = aux._make_chain(read, 'read')\n-\n+# chain = aux._make_chain(read, 'read')\nchain = aux.ChainBase._make_chain(MzML)\n@@ -408,7 +407,12 @@ class PreIndexedMzML(MzML):\nBuild up a `dict` of `dict` of offsets for elements. Calls :meth:`_find_index_list`\nand assigns the return value to :attr:`_offset_index`\n\"\"\"\n- self._offset_index = self._find_index_list()\n+ index = self._find_index_list()\n+ if index:\n+ self._offset_index = index\n+ else:\n+ warnings.warn('Could not extract the embedded offset index. Falling back to default indexing procedure.')\n+ super(PreIndexedMzML, self)._build_index()\n@xml._keepstate\ndef _iterparse_index_list(self, offset):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fall back to regular indexing in PreIndexedMzML |
377,522 | 26.12.2018 18:37:25 | -10,800 | 95aca3e39e12b255625f0a90660bb12d7b294425 | Add retrieve_refs for traml; make get_by_id raise KeyError | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -139,8 +139,9 @@ class MzIdentML(xml.IndexSavingXML, xml.MultiProcessingXML):\nends in _ref. Removes the id attribute from `info`\"\"\"\nfor k, v in dict(info).items():\nif k.endswith('_ref'):\n+ try:\nby_id = self.get_by_id(v, retrieve_refs=True)\n- if by_id is None:\n+ except KeyError:\nwarnings.warn('Ignoring unresolved reference: ' + v)\nelse:\ninfo.update(by_id)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/traml.py",
"new_path": "pyteomics/traml.py",
"diff": "\"\"\"\ntraml - reader for targeted mass spectrometry transition data in TraML format\n-=======================================================\n+=============================================================================\nSummary\n-------\n@@ -59,6 +59,7 @@ This module requires :py:mod:`lxml`\n# limitations under the License.\n+import warnings\nfrom . import xml, _schema_defaults, auxiliary as aux\n@@ -79,6 +80,10 @@ class TraML(xml.MultiProcessingXML, xml.IndexSavingXML):\n'Compound',\n}\n+ def __init__(self, *args, **kwargs):\n+ kwargs.setdefault('retrieve_refs', True)\n+ super(TraML, self).__init__(*args, **kwargs)\n+\ndef _get_info_smart(self, element, **kw):\nkwargs = dict(kw)\nrec = kwargs.pop('recursive', None)\n@@ -88,6 +93,26 @@ class TraML(xml.MultiProcessingXML, xml.IndexSavingXML):\n**kwargs)\nreturn info\n+ def _retrieve_refs(self, info, **kwargs):\n+ \"\"\"Retrieves and embeds the data for each attribute in `info` that\n+ ends in `Ref`. Removes the id attribute from `info`\"\"\"\n+ for k, v in dict(info).items():\n+ if k[-3:] in {'Ref', 'ref'}:\n+ if isinstance(v, str):\n+ key = v\n+ elif isinstance(v, dict):\n+ key = v['ref']\n+ else:\n+ continue\n+ try:\n+ by_id = self.get_by_id(key, retrieve_refs=True)\n+ except KeyError:\n+ warnings.warn('Ignoring unresolved reference: ' + key)\n+ else:\n+ info.update(by_id)\n+ del info[k]\n+ info.pop('id', None)\n+\ndef read(source, read_schema=False, iterative=True, use_index=False, huge_tree=False):\n\"\"\"Parse `source` and iterate through transitions.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -598,12 +598,10 @@ class XML(FileReader):\n-------\nout : :py:class:`dict` or :py:const:`None`\n\"\"\"\n- elem = None\nif not self._id_dict:\nelem = self._find_by_id_no_reset(elem_id)\n- elif elem_id in self._id_dict:\n+ else:\nelem = self._id_dict[elem_id]\n- if elem is not None:\nreturn self._get_info_smart(elem, **kwargs)\n# XPath emulator tools\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add retrieve_refs for traml; make get_by_id raise KeyError |
377,522 | 26.12.2018 19:05:48 | -10,800 | bbdbedd1ed1930e5ba8b19425b05a95d1688fb66 | Tweak traml behavior | [
{
"change_type": "MODIFY",
"old_path": "doc/source/data/xml.rst",
"new_path": "doc/source/data/xml.rst",
"diff": "@@ -333,6 +333,13 @@ Reading into a pandas.DataFrame\nthat reads one or several files into a single Pandas :py:class:`DataFrame`.\nThis function requires :py:mod:`pandas`.\n+TraML\n+-----\n+\n+`TraML <http://www.psidev.info/traml>`_ is also a PSI format. It stores a lot of information on SRM experiments.\n+The parser, :py:class:`pyteomics.traml.TraML`, iterates over `<Transition>` elements by default.\n+Like `MzIdentML`_, it has a `retrieve_refs` parameter that helps pull in the information from other parts of the file.\n+:py:class:`TraML` is one of the `Indexed Parsers`_.\nFeatureXML\n----------\n@@ -389,8 +396,8 @@ Controlled Vocabularies\n`Controlled Vocabularies <http://www.psidev.info/controlled-vocabularies>`_\nare the universal annotation system used in the PSI formats, including\n-**mzML** and **mzIdentML**. :py:class:`pyteomics.mzml.MzML` and :py:class:`pyteomics.mzid.MzIdentML`\n-retain the annotation information. It can be accessed using the helper function, :py:func:`pyteomics.auxiliary.cvquery`:\n+**mzML** and **mzIdentML**. :py:class:`pyteomics.mzml.MzML`, :py:class:`pyteomics.traml.TraML` and :py:class:`pyteomics.mzid.MzIdentML`\n+retain the annotation information. It can be accessed using the helper function, :py:func:`pyteomics.auxiliary.cvquery`::\n>>> from pyteomics import auxiliary as aux, mzid, mzml\n>>> f = mzid.MzIdentML('tests/test.mzid')\n@@ -400,12 +407,3 @@ retain the annotation information. It can be accessed using the helper function,\n>>> aux.cvquery(s)\n{'MS:1001506': 7.59488518903425, 'MS:1001505': 0.3919545603809718}\n>>> f.close()\n-\n- >>> f = mzml.MzML('tests/test.mzML')\n- >>> s = next(f)\n- >>> s\n- {'defaultArrayLength': 19914, 'intensity array': array([ 0., 0., 0., ..., 0., 0., 0.], dtype=float32), 'base peak m/z': 810.415283203125, 'highest observed m/z': 2000.0099466203771, 'index': 0, 'total ion current': 15245068.0, 'id': 'controllerType=0 controllerNumber=1 scan=1', 'count': 2, 'm/z array': array([ 200.00018817, 200.00043034, 200.00067252, ..., 1999.96151259,\n- 1999.98572931, 2000.00994662]), 'ms level': 1, 'base peak intensity': 1471973.875, 'lowest observed m/z': 200.00018816645022, 'MSn spectrum': '', 'positive scan': '', 'scanList': {'count': 1, 'scan': [{'preset scan configuration': 1.0, 'scanWindowList': {'count': 1, 'scanWindow': [{'scan window lower limit': 200.0, 'scan window upper limit': 2000.0}]}, 'instrumentConfigurationRef': 'IC1', 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]', 'scan start time': 0.004935, '[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688}], 'no combination': ''}, 'profile spectrum': ''}\n- >>> aux.cvquery(s, 'MS:1000285')\n- 15245068.0\n- >>> f.close()\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/traml.py",
"new_path": "pyteomics/traml.py",
"diff": "@@ -103,15 +103,21 @@ class TraML(xml.MultiProcessingXML, xml.IndexSavingXML):\nelif isinstance(v, dict):\nkey = v['ref']\nelse:\n+ if k != 'ref':\n+ info[k[:-3]] = info.pop(k)\ncontinue\ntry:\nby_id = self.get_by_id(key, retrieve_refs=True)\nexcept KeyError:\nwarnings.warn('Ignoring unresolved reference: ' + key)\nelse:\n+ if k == 'ref':\ninfo.update(by_id)\n+ else:\n+ # by_id.pop('id', None)\n+ info[k[:-3]] = by_id\ndel info[k]\n- info.pop('id', None)\n+\ndef read(source, read_schema=False, iterative=True, use_index=False, huge_tree=False):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Tweak traml behavior |
377,522 | 03.01.2019 19:38:04 | -10,800 | f054448a8b9733cd51aae50668c3c408c18ae226 | Add more handlers in traml, add test | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/traml.py",
"new_path": "pyteomics/traml.py",
"diff": "@@ -82,7 +82,9 @@ class TraML(xml.MultiProcessingXML, xml.IndexSavingXML):\n_element_handlers = xml.XML._element_handlers.copy()\n_element_handlers.update({\n- \"Modification\": xml.XML._promote_empty_parameter_to_name,\n+ 'Modification': xml.XML._promote_empty_parameter_to_name,\n+ 'Interpretation': xml.XML._promote_empty_parameter_to_name,\n+ 'Software': xml.XML._promote_empty_parameter_to_name,\n})\ndef __init__(self, *args, **kwargs):\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ToyExample1.TraML",
"diff": "+<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n+<TraML version=\"1.0.0\" xmlns=\"http://psi.hupo.org/ms/traml\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://psi.hupo.org/ms/traml TraML1.0.0.xsd\">\n+ <cvList>\n+ <cv id=\"MS\" fullName=\"Proteomics Standards Initiative Mass Spectrometry Ontology\" version=\"2.31.0\" URI=\"http://psidev.cvs.sourceforge.net/*checkout*/psidev/psi/psi-ms/mzML/controlledVocabulary/psi-ms.obo\"/>\n+ <cv id=\"UO\" fullName=\"Unit Ontology\" version=\"unknown\" URI=\"http://obo.cvs.sourceforge.net/obo/obo/ontology/phenotype/unit.obo\"/>\n+ <cv id=\"UNIMOD\" fullName=\"UNIMOD CV for modifications\" version=\"unknown\" URI=\"http://www.unimod.org/obo/unimod.obo\"/>\n+ </cvList>\n+\n+ <SourceFileList>\n+ <SourceFile id=\"sf1\" name=\"OneTransition.tsv\" location=\"file:///F:/data/Exp01\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000914\" name=\"tab delimited text file\" value=\"\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000569\" name=\"SHA-1\" value=\"71be39fb2700ab2f3c8b2234b91274968b6899b1\"/>\n+ </SourceFile>\n+ </SourceFileList>\n+\n+ <ContactList>\n+ <Contact id=\"CS\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000586\" name=\"contact name\" value=\"Eric Deutsch\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000590\" name=\"contact organization\" value=\"Institute for Systems Biology\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000587\" name=\"contact address\" value=\"1441 NE 34th St, Seattle WA 98103, USA\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000588\" name=\"contact URL\" value=\"http://www.systemsbiology.org/\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000589\" name=\"contact email\" value=\"example@systemsbiology.org\"/>\n+ </Contact>\n+ </ContactList>\n+\n+ <PublicationList>\n+ <Publication id=\"PMID12748199\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000879\" name=\"PubMed identifier\" value=\"12748199\"/>\n+ </Publication>\n+ </PublicationList>\n+\n+ <InstrumentList>\n+ <Instrument id=\"LCQ_Deca\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000554\" name=\"LCQ Deca\"/>\n+ </Instrument>\n+ <Instrument id=\"QTRAP\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000870\" name=\"4000 QTRAP\"/>\n+ </Instrument>\n+ </InstrumentList>\n+\n+ <SoftwareList>\n+ <Software id=\"MaRiMba\" version=\"1.0\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000872\" name=\"MaRiMba\"/>\n+ </Software>\n+ <Software id=\"SSRCalc3.0\" version=\"3.0\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000874\" name=\"SSRCalc\"/>\n+ </Software>\n+ <Software id=\"Skyline0.5\" version=\"0.5\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000922\" name=\"Skyline\"/>\n+ </Software>\n+ </SoftwareList>\n+\n+ <!-- Note that the protein names, peptide sequences, modification and transition values are not all fully internally consistent, but are intended merely as examples here -->\n+ <ProteinList>\n+ <Protein id=\"Q12149\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000885\" name=\"protein accession\" value=\"Q00613\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000883\" name=\"protein short name\" value=\"HSF 1\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000886\" name=\"protein name\" value=\"Heat shock factor protein 1\"/>\n+ <Sequence>MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKE</Sequence>\n+ </Protein>\n+ <Protein id=\"ENSP00000332698\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000885\" name=\"protein accession\" value=\"ENSP00000332698\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000883\" name=\"protein short name\" value=\"HSF 1\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000886\" name=\"protein name\" value=\"Heat shock factor protein 1\"/>\n+ <Sequence>MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKEELISNSSDALDKI</Sequence>\n+ </Protein>\n+ </ProteinList>\n+\n+ <CompoundList>\n+ <Peptide id=\"ADTHFLLNIYDQLR-M1\" sequence=\"ADTHFLLNIYDQLR\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000891\" name=\"heavy labeled peptide\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000893\" name=\"peptide group label\" value=\"G1\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000863\" name=\"predicted isoelectric point\" value=\"5.22\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1001117\" name=\"theoretical mass\" value=\"1189.22\" unitCvRef=\"UO\" unitAccession=\"UO:0000221\" unitName=\"dalton\"/>\n+ <userParam name=\"isomerization potential\" value=\"0.583\" type=\"xsd:float\"/> <!-- Additional information may be added as a userParams if it is not possible and not appropriate to encode the information as a cvParam -->\n+ <ProteinRef ref=\"Q12149\"/>\n+ <ProteinRef ref=\"ENSP00000332698\"/>\n+ <Modification location=\"0\" monoisotopicMassDelta=\"127.063324\">\n+ <cvParam cvRef=\"UNIMOD\" accession=\"UNIMOD:29\" name=\"SMA\"/>\n+ </Modification>\n+ <Modification location=\"1\" monoisotopicMassDelta=\"15.994919\">\n+ <cvParam cvRef=\"UNIMOD\" accession=\"UNIMOD:35\" name=\"Oxidation\"/>\n+ </Modification>\n+ <RetentionTimeList>\n+ <RetentionTime softwareRef=\"SSRCalc3.0\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000897\" name=\"predicted retention time\" value=\"44.07\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ </RetentionTime>\n+ <RetentionTime>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000896\" name=\"normalized retention time\" value=\"38.43\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000902\" name=\"H-PINS retention time normalization standard\"/>\n+ </RetentionTime>\n+ </RetentionTimeList>\n+ <Evidence>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1001100\" name=\"confident peptide\" value=\"6\"/>\n+ </Evidence>\n+ </Peptide>\n+ <Peptide id=\"PEPTIDEC\" sequence=\"PEPTIDEC\"/>\n+ <Peptide id=\"PEPTIDEM\" sequence=\"PEPTIDEM\"/>\n+ <Compound id=\"glyoxylate\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1001117\" name=\"theoretical mass\" value=\"423.39\" unitCvRef=\"UO\" unitAccession=\"UO:0000221\" unitName=\"dalton\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000866\" name=\"molecular formula\" value=\"C2HO3\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000868\" name=\"SMILES string\" value=\"[CH](=[O])[C](=[O])[O-]\"/>\n+ <RetentionTimeList>\n+ <RetentionTime>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000896\" name=\"normalized retention time\" value=\"22.34\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000902\" name=\"H-PINS retention time normalization standard\"/>\n+ </RetentionTime>\n+ </RetentionTimeList>\n+ </Compound>\n+ </CompoundList>\n+\n+ <TransitionList>\n+ <Transition id=\"ADTHFLLNIYDQLR-M1-T1\" peptideRef=\"ADTHFLLNIYDQLR-M1\">\n+ <Precursor>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"862.9467\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"2\"/>\n+ </Precursor>\n+ <Product>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"1040.57\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"1\"/>\n+ <InterpretationList>\n+ <Interpretation>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000926\" name=\"product interpretation rank\" value=\"1\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1001220\" name=\"frag: y ion\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000903\" name=\"product ion series ordinal\" value=\"8\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000904\" name=\"product ion m/z delta\" value=\"0.03\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ </Interpretation>\n+ <Interpretation>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000926\" name=\"product interpretation rank\" value=\"2\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1001222\" name=\"frag: b ion - H2O\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000903\" name=\"product ion series ordinal\" value=\"9\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000904\" name=\"product ion m/z delta\" value=\"-0.43\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ </Interpretation>\n+ </InterpretationList>\n+ <ConfigurationList>\n+ <Configuration instrumentRef=\"QTRAP\" contactRef=\"CS\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000502\" name=\"dwell time\" value=\"0.12\" unitCvRef=\"UO\" unitAccession=\"UO:0000010\" unitName=\"second\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000045\" name=\"collision energy\" value=\"26\" unitCvRef=\"UO\" unitAccession=\"UO:0000266\" unitName=\"electronvolt\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000875\" name=\"declustering potential\" value=\"64\" unitCvRef=\"UO\" unitAccession=\"UO:0000218\" unitName=\"volt\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000419\" name=\"collision gas\" value=\"argon\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000869\" name=\"collision gas pressure\" value=\"12\" unitCvRef=\"UO\" unitAccession=\"UO:0000110\" unitName=\"pascal\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000876\" name=\"cone voltage\" value=\"1200\" unitCvRef=\"UO\" unitAccession=\"UO:0000218\" unitName=\"volt\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000880\" name=\"interchannel delay\" value=\"0.01\" unitCvRef=\"UO\" unitAccession=\"UO:0000010\" unitName=\"second\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000877\" name=\"tube lens voltage\" value=\"23\" unitCvRef=\"UO\" unitAccession=\"UO:0000218\" unitName=\"volt\"/>\n+\n+ <ValidationStatus>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000910\" name=\"transition optimized on specified instrument\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000139\" name=\"4000 QTRAP\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000042\" name=\"peak intensity\" value=\"4072\" unitCvRef=\"MS\" unitAccession=\"MS:1000905\" unitName=\"percent of base peak times 100\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000906\" name=\"peak intensity rank\" value=\"2\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000907\" name=\"peak targeting suitability rank\" value=\"1\"/>\n+ </ValidationStatus>\n+ </Configuration>\n+ </ConfigurationList>\n+ </Product>\n+ <RetentionTime softwareRef=\"Skyline0.5\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000895\" name=\"local retention time\" value=\"40.02\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000916\" name=\"retention time window lower offset\" value=\"3.0\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000917\" name=\"retention time window upper offset\" value=\"3.0\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ </RetentionTime>\n+ <Prediction softwareRef=\"MaRiMba\" contactRef=\"CS\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000912\" name=\"transition purported from an MS/MS spectrum on a different, specified instrument\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000291\" name=\"linear ion trap\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000042\" name=\"peak intensity\" value=\"10000\" unitCvRef=\"MS\" unitAccession=\"MS:1000905\" unitName=\"percent of base peak times 100\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000906\" name=\"peak intensity rank\" value=\"1\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000907\" name=\"peak targeting suitability rank\" value=\"1\"/>\n+ </Prediction>\n+ </Transition>\n+\n+ <Transition id=\"ADTHFLLNIYDQLR-M1-T2\" peptideRef=\"ADTHFLLNIYDQLR-M1\">\n+ <Precursor>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"862.9467\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000828\" name=\"isolation window lower offset\" value=\"1.0\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000829\" name=\"isolation window upper offset\" value=\"1.0\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"2\"/>\n+ </Precursor>\n+ <IntermediateProduct>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"1040.57\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"1\"/>\n+ <InterpretationList>\n+ <Interpretation>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000926\" name=\"product interpretation rank\" value=\"1\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1001220\" name=\"frag: y ion\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000903\" name=\"product ion series ordinal\" value=\"8\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000904\" name=\"product ion m/z delta\" value=\"0.03\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ </Interpretation>\n+ </InterpretationList>\n+ <ConfigurationList>\n+ <Configuration instrumentRef=\"QTRAP\" contactRef=\"CS\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000045\" name=\"collision energy\" value=\"26\" unitCvRef=\"UO\" unitAccession=\"UO:0000266\" unitName=\"electronvolt\"/>\n+ </Configuration>\n+ </ConfigurationList>\n+ </IntermediateProduct>\n+ <Product>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"543.2\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"1\"/>\n+ <InterpretationList>\n+ <Interpretation>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000926\" name=\"product interpretation rank\" value=\"1\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1001220\" name=\"frag: y ion\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000903\" name=\"product ion series ordinal\" value=\"4\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000904\" name=\"product ion m/z delta\" value=\"0.03\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ </Interpretation>\n+ </InterpretationList>\n+ <ConfigurationList>\n+ <Configuration instrumentRef=\"QTRAP\" contactRef=\"CS\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000502\" name=\"dwell time\" value=\"0.12\" unitCvRef=\"UO\" unitAccession=\"UO:0000010\" unitName=\"second\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000045\" name=\"collision energy\" value=\"20.4\" unitCvRef=\"UO\" unitAccession=\"UO:0000266\" unitName=\"electronvolt\"/>\n+ </Configuration>\n+ </ConfigurationList>\n+ </Product>\n+ <RetentionTime softwareRef=\"Skyline0.5\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000895\" name=\"local retention time\" value=\"40.02\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000916\" name=\"retention time window lower offset\" value=\"3.0\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000917\" name=\"retention time window upper offset\" value=\"3.0\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ </RetentionTime>\n+ </Transition>\n+\n+ </TransitionList>\n+\n+ <TargetList>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000920\" name=\"includes supersede excludes\"/>\n+ <TargetIncludeList>\n+ <Target id=\"PEPTIDEC2+\" peptideRef=\"PEPTIDEC\">\n+ <Precursor>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"862.9467\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"2\"/>\n+ </Precursor>\n+ <RetentionTime softwareRef=\"Skyline0.5\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000895\" name=\"local retention time\" value=\"27.44\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000916\" name=\"retention time window lower offset\" value=\"4.0\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000917\" name=\"retention time window upper offset\" value=\"4.0\" unitCvRef=\"UO\" unitAccession=\"UO:0000031\" unitName=\"minute\"/>\n+ </RetentionTime>\n+ <ConfigurationList>\n+ <Configuration instrumentRef=\"LCQ_Deca\" contactRef=\"CS\">\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000045\" name=\"collision energy\" value=\"26\" unitCvRef=\"UO\" unitAccession=\"UO:0000266\" unitName=\"electronvolt\"/>\n+ </Configuration>\n+ </ConfigurationList>\n+ </Target>\n+ </TargetIncludeList>\n+ <TargetExcludeList>\n+ <Target id=\"PEPTIDEM3+\" peptideRef=\"PEPTIDEM\">\n+ <Precursor>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"698.3443\" unitCvRef=\"MS\" unitAccession=\"MS:1000040\" unitName=\"m/z\"/>\n+ <cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"3\"/>\n+ </Precursor>\n+ </Target>\n+ </TargetExcludeList>\n+ </TargetList>\n+</TraML>\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1601,7 +1601,8 @@ mzxml_spectra = [\n{'polarity': '+', 'collisionEnergy': 35.0, 'id': '20', 'basePeakIntensity': 301045.0, 'highMz': 905.0, 'msLevel': 2, 'totIonCurrent': 764637.0, 'peaksCount': 43, 'precursorMz': [{'precursorMz': 445.35, 'precursorIntensity': 120053.0}], 'num': '20', 'basePeakMz': 428.905, 'lowMz': 110.0, 'intensity array': makeCA([3071.0, 1259.0, 564.0, 2371.0, 1646.0, 1546.0, 1093.0, 1498.0, 1110.0, 2013.0, 1535.0, 1973.0, 28317.0, 4071.0, 792.0, 2456.0, 3167.0, 1673.0, 216505.0, 30083.0, 2.0, 1192.0, 1273.0, 2070.0, 3120.0, 11655.0, 2124.0, 821.0, 825.0, 4734.0, 3214.0, 1235.0, 6617.0, 4802.0, 3320.0, 301045.0, 101500.0, 666.0, 1892.0, 1301.0, 1923.0, 683.0, 1880.0]), 'm/z array': makeCA([223.08883666992188, 244.08282470703125, 270.891845703125, 277.880859375, 281.1331787109375, 293.664794921875, 311.64837646484375, 312.763916015625, 329.0174560546875, 333.06805419921875, 336.62493896484375, 338.9378662109375, 340.9237060546875, 341.9869384765625, 348.98486328125, 351.067138671875, 354.82891845703125, 357.0274658203125, 358.66326904296875, 359.61871337890625, 360.2332763671875, 370.48370361328125, 382.07147216796875, 383.66082763671875, 385.33001708984375, 386.373291015625, 388.41363525390625, 398.84710693359375, 400.7999267578125, 401.9385986328125, 410.0867919921875, 420.408447265625, 426.13665771484375, 426.94586181640625, 428.072509765625, 428.90478515625, 429.922607421875, 430.8460693359375, 438.67962646484375, 443.957275390625, 444.7640380859375, 446.65692138671875, 531.078369140625]), 'retentionTime': 5.9446666666666665}\n]\n-ms1_spectra = [{'intensity array': makeCA([ 0. , 20.0522 , 29.26406, 30.04175, 20.19221, 11.58895,\n+ms1_spectra = [\n+ {'intensity array': makeCA([ 0. , 20.0522 , 29.26406, 30.04175, 20.19221, 11.58895,\n0. ]),\n'm/z array': makeCA([ 2.51263, 82.51282, 82.51301, 82.51321, 82.5134 , 82.51359,\n82.51378]),\n@@ -2169,3 +2170,148 @@ protxml_results =[{'group_number': 1,\n'total_number_distinct_peptides': 29,\n'total_number_peptides': 29,\n'unique_stripped_peptides': ['AEIATEEFIPVTPIIEGFVIIR', 'AEIATEEFIPVTPIIEGFVIIRK', 'APVQPQQSPAAAPGGTDEKPSGK', 'AVPIAIAIISVSNPR', 'CAIGVFR', 'DKAPVQPQQSPAAAPGGTDEKPSGK', 'EIDIMEPK', 'EPIITIVK', 'EWQEIDDAEKVQREPIITIVK', 'FGGSGSQVDSAR', 'GTITICPYHSDR', 'INIIDTISK', 'IVGSQEEIASWGHEYVR', 'MIVTFDEEIRPIPVSVR', 'MNIASSFVNGFVNAAFGQDK', 'SGAIIACGIVNSGVR', 'TITGFQTHTTPVIIAHGER', 'VGQAVDVVGQAGKPK', 'VPDDIYKTHIENNR', 'VQREPIITIVK', 'YGEPTIR', 'YIYSSEDYIK']}]}]\n+\n+transitions = [\n+{'Precursor': {'charge state': 2.0,\n+ 'isolation window target m/z': 862.9467},\n+ 'Prediction': {'contact': {'contact URL': 'http://www.systemsbiology.org/',\n+ 'contact address': '1441 NE 34th St, Seattle WA 98103, USA',\n+ 'contact email': 'example@systemsbiology.org',\n+ 'contact name': 'Eric Deutsch',\n+ 'contact organization': 'Institute for Systems Biology',\n+ 'id': 'CS'},\n+ 'linear ion trap': '',\n+ 'peak intensity': 10000.0,\n+ 'peak intensity rank': 1.0,\n+ 'peak targeting suitability rank': 1.0,\n+ 'software': {'name': 'MaRiMba', 'id': 'MaRiMba', 'version': '1.0'},\n+ 'transition purported from an MS/MS spectrum on a different, specified instrument': ''},\n+ 'Product': {'ConfigurationList': {'Configuration': [{'ValidationStatus': [{'4000 QTRAP': '',\n+ 'peak intensity': 4072.0,\n+ 'peak intensity rank': 2.0,\n+ 'peak targeting suitability rank': 1.0,\n+ 'transition optimized on specified instrument': ''}],\n+ 'collision energy': 26.0,\n+ 'collision gas': 'argon',\n+ 'collision gas pressure': 12.0,\n+ 'cone voltage': 1200.0,\n+ 'contact': {'contact URL': 'http://www.systemsbiology.org/',\n+ 'contact address': '1441 NE 34th St, Seattle WA 98103, USA',\n+ 'contact email': 'example@systemsbiology.org',\n+ 'contact name': 'Eric Deutsch',\n+ 'contact organization': 'Institute for Systems Biology',\n+ 'id': 'CS'},\n+ 'declustering potential': 64.0,\n+ 'dwell time': 0.12,\n+ 'instrument': {'4000 QTRAP': '', 'id': 'QTRAP'},\n+ 'interchannel delay': 0.01,\n+ 'tube lens voltage': 23.0}]},\n+ 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion',\n+ 'product interpretation rank': 1.0,\n+ 'product ion m/z delta': 0.03,\n+ 'product ion series ordinal': 8.0},\n+ {'name': 'frag: b ion - H2O',\n+ 'product interpretation rank': 2.0,\n+ 'product ion m/z delta': -0.43,\n+ 'product ion series ordinal': 9.0}]},\n+ 'charge state': 1.0,\n+ 'isolation window target m/z': 1040.57},\n+ 'RetentionTime': [{'local retention time': 40.02,\n+ 'retention time window lower offset': 3.0,\n+ 'retention time window upper offset': 3.0,\n+ 'software': {'name': 'Skyline', 'id': 'Skyline0.5', 'version': '0.5'}}],\n+ 'id': 'ADTHFLLNIYDQLR-M1-T1',\n+ 'peptide': {'Evidence': {'confident peptide': 6.0},\n+ 'Modification': [{'location': 0,\n+ 'monoisotopicMassDelta': 127.063324,\n+ 'name': 'SMA'},\n+ {'location': 1, 'monoisotopicMassDelta': 15.994919, 'name': 'Oxidation'}],\n+ 'Protein': [{'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKE',\n+ 'id': 'Q12149',\n+ 'protein accession': 'Q00613',\n+ 'protein name': 'Heat shock factor protein 1',\n+ 'protein short name': 'HSF 1',\n+ 'ref': 'Q12149'},\n+ {'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKEELISNSSDALDKI',\n+ 'id': 'ENSP00000332698',\n+ 'protein accession': 'ENSP00000332698',\n+ 'protein name': 'Heat shock factor protein 1',\n+ 'protein short name': 'HSF 1',\n+ 'ref': 'ENSP00000332698'}],\n+ 'RetentionTimeList': [{'RetentionTime': [{'predicted retention time': 44.07,\n+ 'software': {'name': 'SSRCalc', 'id': 'SSRCalc3.0', 'version': '3.0'}},\n+ {'H-PINS retention time normalization standard': '',\n+ 'normalized retention time': 38.43}]}],\n+ 'heavy labeled peptide': '',\n+ 'id': 'ADTHFLLNIYDQLR-M1',\n+ 'isomerization potential': 0.583,\n+ 'peptide group label': 'G1',\n+ 'predicted isoelectric point': 5.22,\n+ 'sequence': 'ADTHFLLNIYDQLR',\n+ 'theoretical mass': 1189.22}},\n+ {'IntermediateProduct': [{'ConfigurationList': {'Configuration': [{'collision energy': 26.0,\n+ 'contact': {'contact URL': 'http://www.systemsbiology.org/',\n+ 'contact address': '1441 NE 34th St, Seattle WA 98103, USA',\n+ 'contact email': 'example@systemsbiology.org',\n+ 'contact name': 'Eric Deutsch',\n+ 'contact organization': 'Institute for Systems Biology',\n+ 'id': 'CS'},\n+ 'instrument': {'4000 QTRAP': '', 'id': 'QTRAP'}}]},\n+ 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion',\n+ 'product interpretation rank': 1.0,\n+ 'product ion m/z delta': 0.03,\n+ 'product ion series ordinal': 8.0}]},\n+ 'charge state': 1.0,\n+ 'isolation window target m/z': 1040.57}],\n+ 'Precursor': {'charge state': 2.0,\n+ 'isolation window lower offset': 1.0,\n+ 'isolation window target m/z': 862.9467,\n+ 'isolation window upper offset': 1.0},\n+ 'Product': {'ConfigurationList': {'Configuration': [{'collision energy': 20.4,\n+ 'contact': {'contact URL': 'http://www.systemsbiology.org/',\n+ 'contact address': '1441 NE 34th St, Seattle WA 98103, USA',\n+ 'contact email': 'example@systemsbiology.org',\n+ 'contact name': 'Eric Deutsch',\n+ 'contact organization': 'Institute for Systems Biology',\n+ 'id': 'CS'},\n+ 'dwell time': 0.12,\n+ 'instrument': {'4000 QTRAP': '', 'id': 'QTRAP'}}]},\n+ 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion',\n+ 'product interpretation rank': 1.0,\n+ 'product ion m/z delta': 0.03,\n+ 'product ion series ordinal': 4.0}]},\n+ 'charge state': 1.0,\n+ 'isolation window target m/z': 543.2},\n+ 'RetentionTime': [{'local retention time': 40.02,\n+ 'retention time window lower offset': 3.0,\n+ 'retention time window upper offset': 3.0,\n+ 'software': {'name': 'Skyline', 'id': 'Skyline0.5', 'version': '0.5'}}],\n+ 'id': 'ADTHFLLNIYDQLR-M1-T2',\n+ 'peptide': {'Evidence': {'confident peptide': 6.0},\n+ 'Modification': [{'location': 0,\n+ 'monoisotopicMassDelta': 127.063324,\n+ 'name': 'SMA'},\n+ {'location': 1, 'monoisotopicMassDelta': 15.994919, 'name': 'Oxidation'}],\n+ 'Protein': [{'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKE',\n+ 'id': 'Q12149',\n+ 'protein accession': 'Q00613',\n+ 'protein name': 'Heat shock factor protein 1',\n+ 'protein short name': 'HSF 1',\n+ 'ref': 'Q12149'},\n+ {'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKEELISNSSDALDKI',\n+ 'id': 'ENSP00000332698',\n+ 'protein accession': 'ENSP00000332698',\n+ 'protein name': 'Heat shock factor protein 1',\n+ 'protein short name': 'HSF 1',\n+ 'ref': 'ENSP00000332698'}],\n+ 'RetentionTimeList': [{'RetentionTime': [{'predicted retention time': 44.07,\n+ 'software': {'name': 'SSRCalc', 'id': 'SSRCalc3.0', 'version': '3.0'}},\n+ {'H-PINS retention time normalization standard': '',\n+ 'normalized retention time': 38.43}]}],\n+ 'heavy labeled peptide': '',\n+ 'id': 'ADTHFLLNIYDQLR-M1',\n+ 'isomerization potential': 0.583,\n+ 'peptide group label': 'G1',\n+ 'predicted isoelectric point': 5.22,\n+ 'sequence': 'ADTHFLLNIYDQLR',\n+ 'theoretical mass': 1189.22}}]\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_featurexml.py",
"new_path": "tests/test_featurexml.py",
"diff": "@@ -5,7 +5,7 @@ pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir\nimport unittest\nfrom itertools import product\nfrom data import features\n-from pyteomics.openms.featurexml import *\n+from pyteomics.openms.featurexml import FeatureXML, read, chain\nclass FeatureXMLTest(unittest.TestCase):\nmaxDiff = None\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test_traml.py",
"diff": "+from os import path\n+import pyteomics\n+pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+\n+import unittest\n+from itertools import product\n+from data import transitions\n+from pyteomics.traml import TraML, read, chain\n+\n+class FeatureXMLTest(unittest.TestCase):\n+ maxDiff = None\n+ path = 'ToyExample1.TraML'\n+ def testRead(self):\n+ for rs, it, ui in product([True, False], repeat=3):\n+ for func in [TraML, read, chain,\n+ lambda x, **kw: chain.from_iterable([x], **kw)]:\n+ with func(self.path, read_schema=rs, iterative=it, use_index=ui) as r:\n+ self.assertEqual(transitions, list(r))\n+\n+if __name__ == '__main__':\n+ unittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add more handlers in traml, add test |
377,522 | 03.01.2019 20:13:47 | -10,800 | a1c0de9e655112b5fb70701ce6a80c7e3dd2fada | Add retrieve_refs=False to traml test | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/traml.py",
"new_path": "pyteomics/traml.py",
"diff": "@@ -127,7 +127,7 @@ class TraML(xml.MultiProcessingXML, xml.IndexSavingXML):\n-def read(source, read_schema=False, iterative=True, use_index=False, huge_tree=False):\n+def read(source, retrieve_refs=True, read_schema=False, iterative=True, use_index=False, huge_tree=False):\n\"\"\"Parse `source` and iterate through transitions.\nParameters\n@@ -135,6 +135,11 @@ def read(source, read_schema=False, iterative=True, use_index=False, huge_tree=F\nsource : str or file\nA path to a target TraML file or the file object itself.\n+ retrieve_refs : bool, optional\n+ If :py:const:`True`, additional information from references will be\n+ automatically added to the results. The file processing time will\n+ increase. Default is :py:const:`True`.\n+\nread_schema : bool, optional\nIf :py:const:`True`, attempt to extract information from the XML schema\nmentioned in the TraML header. Otherwise, use default parameters.\n@@ -159,11 +164,11 @@ def read(source, read_schema=False, iterative=True, use_index=False, huge_tree=F\nReturns\n-------\n- out : iterator\n- An iterator over the dicts with spectrum properties.\n+ out : TraML\n+ A :py:class:`TraML` object, suitable for iteration and possibly random access.\n\"\"\"\n- return TraML(source, read_schema=read_schema, iterative=iterative,\n+ return TraML(source, retrieve_refs=retrieve_refs, read_schema=read_schema, iterative=iterative,\nuse_index=use_index, huge_tree=huge_tree)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -2172,6 +2172,76 @@ protxml_results =[{'group_number': 1,\n'unique_stripped_peptides': ['AEIATEEFIPVTPIIEGFVIIR', 'AEIATEEFIPVTPIIEGFVIIRK', 'APVQPQQSPAAAPGGTDEKPSGK', 'AVPIAIAIISVSNPR', 'CAIGVFR', 'DKAPVQPQQSPAAAPGGTDEKPSGK', 'EIDIMEPK', 'EPIITIVK', 'EWQEIDDAEKVQREPIITIVK', 'FGGSGSQVDSAR', 'GTITICPYHSDR', 'INIIDTISK', 'IVGSQEEIASWGHEYVR', 'MIVTFDEEIRPIPVSVR', 'MNIASSFVNGFVNAAFGQDK', 'SGAIIACGIVNSGVR', 'TITGFQTHTTPVIIAHGER', 'VGQAVDVVGQAGKPK', 'VPDDIYKTHIENNR', 'VQREPIITIVK', 'YGEPTIR', 'YIYSSEDYIK']}]}]\ntransitions = [\n+[{'Precursor': {'charge state': 2.0,\n+ 'isolation window target m/z': 862.9467},\n+ 'Prediction': {'contactRef': 'CS',\n+ 'linear ion trap': '',\n+ 'peak intensity': 10000.0,\n+ 'peak intensity rank': 1.0,\n+ 'peak targeting suitability rank': 1.0,\n+ 'softwareRef': 'MaRiMba',\n+ 'transition purported from an MS/MS spectrum on a different, specified instrument': ''},\n+ 'Product': {'ConfigurationList': {'Configuration': [{'ValidationStatus': [{'4000 QTRAP': '',\n+ 'peak intensity': 4072.0,\n+ 'peak intensity rank': 2.0,\n+ 'peak targeting suitability rank': 1.0,\n+ 'transition optimized on specified instrument': ''}],\n+ 'collision energy': 26.0,\n+ 'collision gas': 'argon',\n+ 'collision gas pressure': 12.0,\n+ 'cone voltage': 1200.0,\n+ 'contactRef': 'CS',\n+ 'declustering potential': 64.0,\n+ 'dwell time': 0.12,\n+ 'instrumentRef': 'QTRAP',\n+ 'interchannel delay': 0.01,\n+ 'tube lens voltage': 23.0}]},\n+ 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion',\n+ 'product interpretation rank': 1.0,\n+ 'product ion m/z delta': 0.03,\n+ 'product ion series ordinal': 8.0},\n+ {'name': 'frag: b ion - H2O',\n+ 'product interpretation rank': 2.0,\n+ 'product ion m/z delta': -0.43,\n+ 'product ion series ordinal': 9.0}]},\n+ 'charge state': 1.0,\n+ 'isolation window target m/z': 1040.57},\n+ 'RetentionTime': [{'local retention time': 40.02,\n+ 'retention time window lower offset': 3.0,\n+ 'retention time window upper offset': 3.0,\n+ 'softwareRef': 'Skyline0.5'}],\n+ 'id': 'ADTHFLLNIYDQLR-M1-T1',\n+ 'peptideRef': 'ADTHFLLNIYDQLR-M1'},\n+ {'IntermediateProduct': [{'ConfigurationList': {'Configuration': [{'collision energy': 26.0,\n+ 'contactRef': 'CS',\n+ 'instrumentRef': 'QTRAP'}]},\n+ 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion',\n+ 'product interpretation rank': 1.0,\n+ 'product ion m/z delta': 0.03,\n+ 'product ion series ordinal': 8.0}]},\n+ 'charge state': 1.0,\n+ 'isolation window target m/z': 1040.57}],\n+ 'Precursor': {'charge state': 2.0,\n+ 'isolation window lower offset': 1.0,\n+ 'isolation window target m/z': 862.9467,\n+ 'isolation window upper offset': 1.0},\n+ 'Product': {'ConfigurationList': {'Configuration': [{'collision energy': 20.4,\n+ 'contactRef': 'CS',\n+ 'dwell time': 0.12,\n+ 'instrumentRef': 'QTRAP'}]},\n+ 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion',\n+ 'product interpretation rank': 1.0,\n+ 'product ion m/z delta': 0.03,\n+ 'product ion series ordinal': 4.0}]},\n+ 'charge state': 1.0,\n+ 'isolation window target m/z': 543.2},\n+ 'RetentionTime': [{'local retention time': 40.02,\n+ 'retention time window lower offset': 3.0,\n+ 'retention time window upper offset': 3.0,\n+ 'softwareRef': 'Skyline0.5'}],\n+ 'id': 'ADTHFLLNIYDQLR-M1-T2',\n+ 'peptideRef': 'ADTHFLLNIYDQLR-M1'}],\n+[\n{'Precursor': {'charge state': 2.0,\n'isolation window target m/z': 862.9467},\n'Prediction': {'contact': {'contact URL': 'http://www.systemsbiology.org/',\n@@ -2315,3 +2385,4 @@ transitions = [\n'predicted isoelectric point': 5.22,\n'sequence': 'ADTHFLLNIYDQLR',\n'theoretical mass': 1189.22}}]\n+]\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzid.py",
"new_path": "tests/test_mzid.py",
"diff": "@@ -19,10 +19,6 @@ class MzidTest(unittest.TestCase):\npsms = list(reader)\nself.assertEqual(psms, mzid_spectra[(rec, refs)])\n- def test_skip_empty_values(self):\n- with MzIdentML(self.path, skip_empty_cvparam_values=True, recursive=True, retrieve_refs=True) as f:\n- self.assertEqual(list(f), mzid_spectra[(True, True)])\n-\ndef test_unit_info(self):\nwith MzIdentML(self.path) as handle:\nfor protocol in handle.iterfind(\"SpectrumIdentificationProtocol\"):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_traml.py",
"new_path": "tests/test_traml.py",
"diff": "@@ -11,11 +11,11 @@ class FeatureXMLTest(unittest.TestCase):\nmaxDiff = None\npath = 'ToyExample1.TraML'\ndef testRead(self):\n- for rs, it, ui in product([True, False], repeat=3):\n+ for rs, it, ui, rr in product([True, False], repeat=4):\nfor func in [TraML, read, chain,\nlambda x, **kw: chain.from_iterable([x], **kw)]:\n- with func(self.path, read_schema=rs, iterative=it, use_index=ui) as r:\n- self.assertEqual(transitions, list(r))\n+ with func(self.path, read_schema=rs, iterative=it, use_index=ui, retrieve_refs=rr) as r:\n+ self.assertEqual(transitions[rr], list(r))\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add retrieve_refs=False to traml test |
377,522 | 03.01.2019 20:45:02 | -10,800 | 3b33cbbdbb33649883a5d42fa07f09d21b125a09 | Fix multiprocessing for mzid and featurexml | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -108,7 +108,7 @@ class MzIdentML(xml.IndexSavingXML, xml.MultiProcessingXML):\n_default_version = '1.1.0'\n_default_iter_tag = 'SpectrumIdentificationResult'\n_structures_to_flatten = {'Fragmentation'}\n- _indexed_tags = {\n+ _indexed_tags = {'SpectrumIdentificationResult',\n'PeptideEvidence', 'SpectrumIdentificationItem', 'SearchDatabase',\n'DBSequence', 'SpectraData', 'Peptide'}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/openms/featurexml.py",
"new_path": "pyteomics/openms/featurexml.py",
"diff": "@@ -39,7 +39,7 @@ This module requres :py:mod:`lxml`.\nfrom .. import xml, auxiliary as aux, _schema_defaults\n-class FeatureXML(xml.IndexedXML):\n+class FeatureXML(xml.MultiProcessingXML):\n\"\"\"Parser class for featureXML files.\"\"\"\nfile_format = 'featureXML'\n_root_element = 'featureMap'\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_featurexml.py",
"new_path": "tests/test_featurexml.py",
"diff": "@@ -3,18 +3,24 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport unittest\n+import operator as op\nfrom itertools import product\nfrom data import features\nfrom pyteomics.openms.featurexml import FeatureXML, read, chain\nclass FeatureXMLTest(unittest.TestCase):\nmaxDiff = None\n+ path = 'test.featureXML'\ndef testRead(self):\nfor rs, it, ui in product([True, False], repeat=3):\nfor func in [FeatureXML, read, chain,\nlambda x, **kw: chain.from_iterable([x], **kw)]:\n- with func('test.featureXML', read_schema=rs, iterative=it, use_index=ui) as r:\n+ with func(self.path, read_schema=rs, iterative=it, use_index=ui) as r:\nself.assertEqual(features, list(r))\n+ def test_map(self):\n+ self.assertEqual(sorted(features, key=op.itemgetter('id')),\n+ sorted(FeatureXML(self.path).map(), key=op.itemgetter('id')))\n+\nif __name__ == '__main__':\nunittest.main()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzid.py",
"new_path": "tests/test_mzid.py",
"diff": "@@ -6,6 +6,7 @@ from pyteomics.mzid import MzIdentML, read, chain\nfrom pyteomics import auxiliary as aux\nfrom data import mzid_spectra\nfrom itertools import product\n+import operator as op\nclass MzidTest(unittest.TestCase):\nmaxDiff = None\n@@ -36,6 +37,10 @@ class MzidTest(unittest.TestCase):\nindex = aux.cvquery(datum)\nassert index['MS:1000774'] == 'multiple peak list nativeID format'\n+ def test_map(self):\n+ self.assertEqual(len(mzid_spectra[(1, 1)]),\n+ sum(1 for _ in MzIdentML(self.path).map()))\n+\nif __name__ == '__main__':\nunittest.main()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_traml.py",
"new_path": "tests/test_traml.py",
"diff": "@@ -4,6 +4,7 @@ pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir\nimport unittest\nfrom itertools import product\n+import operator as op\nfrom data import transitions\nfrom pyteomics.traml import TraML, read, chain\n@@ -17,5 +18,10 @@ class FeatureXMLTest(unittest.TestCase):\nwith func(self.path, read_schema=rs, iterative=it, use_index=ui, retrieve_refs=rr) as r:\nself.assertEqual(transitions[rr], list(r))\n+ def test_map(self):\n+ self.assertEqual(sorted(transitions[1], key=op.itemgetter('id')),\n+ sorted(TraML(self.path).map(), key=op.itemgetter('id')))\n+\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix multiprocessing for mzid and featurexml |
377,522 | 04.01.2019 02:32:37 | -10,800 | 44816e5d87422b24bc440d25d4901b6c4d9d68bf | Minor test brush-ups | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mzid.py",
"new_path": "tests/test_mzid.py",
"diff": "@@ -6,7 +6,6 @@ from pyteomics.mzid import MzIdentML, read, chain\nfrom pyteomics import auxiliary as aux\nfrom data import mzid_spectra\nfrom itertools import product\n-import operator as op\nclass MzidTest(unittest.TestCase):\nmaxDiff = None\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzxml.py",
"new_path": "tests/test_mzxml.py",
"diff": "import os\n-from os import path\nimport pyteomics\n-pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]\nfrom itertools import product\nimport unittest\n-from pyteomics.mzxml import *\n+from pyteomics.mzxml import MzXML, read, chain\n+from pyteomics import xml\nfrom data import mzxml_spectra\nimport tempfile\nimport shutil\n@@ -40,25 +40,25 @@ class MzXMLTest(unittest.TestCase):\ndef test_prebuild_index(self):\ntest_dir = tempfile.mkdtemp()\n- work_path = path.join(test_dir, self.path)\n+ work_path = os.path.join(test_dir, self.path)\nwith open(work_path, 'w') as dest, open(self.path) as source:\ndest.write(source.read())\nassert dest.closed\nwith MzXML(work_path, use_index=True) as inst:\n- offsets_exist = path.exists(inst._byte_offset_filename)\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))\nself.assertTrue(inst._source.closed)\nMzXML.prebuild_byte_offset_file(work_path)\nwith MzXML(work_path, use_index=True) as inst:\n- offsets_exist = path.exists(inst._byte_offset_filename)\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\nself.assertTrue(offsets_exist)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))\nself.assertTrue(inst._source.closed)\nos.remove(inst._byte_offset_filename)\nwith MzXML(work_path, use_index=True) as inst:\n- offsets_exist = path.exists(inst._byte_offset_filename)\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))\nself.assertTrue(inst._source.closed)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Minor test brush-ups |
377,522 | 05.01.2019 20:03:53 | -10,800 | 5cbdfddf3f5a179b320bec590e7850bb7d634f85 | Fix in mzml test | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -44,11 +44,11 @@ class MzmlTest(unittest.TestCase):\nspectrum = next(reader)\nrecord = spectrum['m/z array']\nself.assertEqual(record.compression, \"no compression\")\n- self.assertEqual(record.dtype, \"d\")\n+ self.assertEqual(record.dtype, np.float64)\narray = record.decode()\nself.assertTrue(np.allclose(validation, array))\nrecord = spectrum['intensity array']\n- self.assertEqual(record.dtype, \"f\")\n+ self.assertEqual(record.dtype, np.float32)\nself.assertEqual(record.compression, \"no compression\")\nspectrum = next(reader)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix in mzml test |
377,522 | 05.01.2019 20:11:36 | -10,800 | 0220c6509b9da5e024ff9c14fa7ae8be69a0b17a | Get rid of warnings patching | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -5,9 +5,6 @@ except NameError:\nfrom . import patch as __patch\n-import warnings\n-warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\n-\nfrom .structures import (\nPyteomicsError, Charge, ChargeList,\n_parse_charge, BasicComposition,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -8,7 +8,6 @@ import json\nimport multiprocessing as mp\nimport threading\nimport warnings\n-# warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\ntry:\nbasestring\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -49,7 +49,6 @@ try: # Python 2.7\nexcept ImportError: # Python 3.x\nfrom urllib.request import urlopen, URLError\n-# warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\ndef _local_name(element):\n\"\"\"Strip namespace from the XML element's name\"\"\"\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Get rid of warnings patching |
377,522 | 07.01.2019 15:23:31 | -10,800 | 8435e7d23b465dbcc5d5fa036067dd6d5b5bf7a1 | Patch release 4.0.1 | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+4.0.1\n+-----\n+\n+Fix issue `#35 <https://bitbucket.org/levitsky/pyteomics/issues/35/ordereddict-may-be-in-reversed-order-on>`_\n+(incorrect order of deserialized offset indexes on older Python versions).\n+\n4.0\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.0\n\\ No newline at end of file\n+4.0.1\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Patch release 4.0.1 |
377,522 | 30.01.2019 18:29:23 | -10,800 | b90cc511c9c44c17e85c9aec956205153bf78dff | Add plot_spectrum function | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-4.1\n----\n+4.1.dev\n+-------\n- New module :py:mod:`pyteomics.mztab` provides a parser for `mzTab files <http://www.psidev.info/mztab>`_.\n- New module :py:mod:`pyteomics.ms2` provides a parser for **ms2** files.\nThis is in fact an alias to :py:mod:`ms1`, which handles both formats.\n+- New helper function :py:func:`pyteomics.pylab_aux.plot_spectrum`.\n+\n4.0.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1a0\n\\ No newline at end of file\n+4.1a1\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -17,6 +17,11 @@ Generic plotting\n:py:func:`plot_function_contour` - plot a contour graph of a function of\ntwo variables.\n+Spectrum visualization\n+----------------------\n+\n+ :py:func:`plot_spectrum` - plot a single spectrum (m/z vs intensity).\n+\nFDR control\n-----------\n@@ -294,3 +299,34 @@ def plot_qvalue_curve(qvalues, *args, **kwargs):\npylab.ylabel(kwargs.pop('ylabel', '# of PSMs'))\npylab.title(kwargs.pop('title', ''))\nreturn pylab.plot(qvalues, 1+np.arange(qvalues.size), *args, **kwargs)\n+\n+def plot_spectrum(spectrum, centroided=False, *args, **kwargs):\n+ \"\"\"\n+ Plot a spectrum, assuming it is a dictionary containing \"m/z array\" and \"intensity array\".\n+\n+ Parameters\n+ ----------\n+ spectrum : dict\n+ A dictionary, as returned by MGF, mzML or mzXML parsers.\n+ Must contain \"m/z array\" and \"intensity array\" keys with decoded arrays.\n+ centroided : bool, optional\n+ If :py:const:`True`, peaks of the spectrum are plotted using :py:func:`pylab.bar`.\n+ If :py:const:`False` (default), the arrays are simply plotted using :py:func:`pylab.plot`.\n+ xlabel : str, optional\n+ Label for the X axis. Default is \"m/z\".\n+ ylabel : str, optional\n+ Label for the Y axis. Default is \"intensity\".\n+ title : str, optional\n+ The title. Empty by default.\n+ *args, **kwargs : will be given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).\n+ \"\"\"\n+ pylab.xlabel(kwargs.pop('xlabel', 'm/z'))\n+ pylab.ylabel(kwargs.pop('ylabel', 'intensity'))\n+ pylab.title(kwargs.pop('title', ''))\n+ if centroided:\n+ kwargs.setdefault('align', 'center')\n+ kwargs.setdefault('width', 0)\n+ kwargs.setdefault('linewidth', 1)\n+ kwargs.setdefault('edgecolor', 'k')\n+ return pylab.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+ return pylab.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add plot_spectrum function |
377,522 | 31.01.2019 17:30:16 | -10,800 | f7151a1eda23f591e82257cdf6e26e277c01120d | Add RefSeq parser to fasta | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -549,6 +549,24 @@ class IndexedNCBI(NCBIMixin, TwoLayerIndexedFASTA):\npass\n+class RefSeqMixin(FlavoredMixin):\n+ header_pattern = r'^ref\\|([^|]+)\\|\\s*([^\\[]*\\S)\\s*\\[(.*)\\]'\n+\n+ def parser(self, header):\n+ ID, description, organism = re.match(self.header_pattern, header).groups()\n+ return {'id': ID, 'description': description, 'taxon': organism}\n+\n+\n+@_add_init\n+class RefSeq(RefSeqMixin, FASTA):\n+ pass\n+\n+\n+@_add_init\n+class IndexedRefSeq(RefSeqMixin, TwoLayerIndexedFASTA):\n+ pass\n+\n+\ndef read(source=None, use_index=None, flavor=None, **kwargs):\n\"\"\"Parse a FASTA file. This function serves as a dispatcher between\ndifferent parsers available in this module.\n@@ -854,6 +872,7 @@ def _intify(d, keys):\nstd_parsers = {'uniprot': (UniProt, IndexedUniProt), 'uniref': (UniRef, IndexedUniRef),\n'uniparc': (UniParc, IndexedUniParc), 'unimes': (UniMes, IndexedUniMes),\n'spd': (SPD, IndexedSPD), 'ncbi': (NCBI, IndexedNCBI),\n+ 'refseq': (RefSeq, IndexedRefSeq),\nNone: (FASTA, IndexedFASTA)}\n\"\"\"A dictionary with parsers for known FASTA header formats. For now, supported\nformats are those described at\n@@ -861,7 +880,7 @@ formats are those described at\n_std_mixins = {'uniprot': UniProtMixin, 'uniref': UniRefMixin,\n'uniparc': UniParcMixin, 'unimes': UniMesMixin, 'spd': SPDMixin,\n- 'ncbi': NCBIMixin}\n+ 'ncbi': NCBIMixin, 'refseq': RefSeqMixin}\ndef parse(header, flavor='auto', parsers=None):\n\"\"\"Parse the FASTA header and return a nice dictionary.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add RefSeq parser to fasta |
377,522 | 04.02.2019 19:11:10 | -10,800 | 82d532235230c694df636fe3bfe34b2bbea08c5d | Draft annotate_spectrum function | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -61,6 +61,7 @@ This module requires :py:mod:`matplotlib`.\nimport pylab\nimport numpy as np\nfrom .auxiliary import linear_regression, PyteomicsError\n+from . import parser, mass\ndef plot_line(a, b, xlim=None, *args, **kwargs):\n\"\"\"Plot a line y = a * x + b.\n@@ -300,7 +301,7 @@ def plot_qvalue_curve(qvalues, *args, **kwargs):\npylab.title(kwargs.pop('title', ''))\nreturn pylab.plot(qvalues, 1+np.arange(qvalues.size), *args, **kwargs)\n-def plot_spectrum(spectrum, centroided=False, *args, **kwargs):\n+def plot_spectrum(spectrum, centroided=True, *args, **kwargs):\n\"\"\"\nPlot a spectrum, assuming it is a dictionary containing \"m/z array\" and \"intensity array\".\n@@ -310,8 +311,8 @@ def plot_spectrum(spectrum, centroided=False, *args, **kwargs):\nA dictionary, as returned by MGF, mzML or mzXML parsers.\nMust contain \"m/z array\" and \"intensity array\" keys with decoded arrays.\ncentroided : bool, optional\n- If :py:const:`True`, peaks of the spectrum are plotted using :py:func:`pylab.bar`.\n- If :py:const:`False` (default), the arrays are simply plotted using :py:func:`pylab.plot`.\n+ If :py:const:`True` (default), peaks of the spectrum are plotted using :py:func:`pylab.bar`.\n+ If :py:const:`False`, the arrays are simply plotted using :py:func:`pylab.plot`.\nxlabel : str, optional\nLabel for the X axis. Default is \"m/z\".\nylabel : str, optional\n@@ -330,3 +331,38 @@ def plot_spectrum(spectrum, centroided=False, *args, **kwargs):\nkwargs.setdefault('edgecolor', 'k')\nreturn pylab.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\nreturn pylab.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)\n+\n+\n+def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\n+ types = kwargs.pop('types', ('b', 'y'))\n+ maxcharge = kwargs.pop('maxcharge', 1)\n+ aa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\n+ std_colors = {i: 'red' for i in 'xyz'}\n+ std_colors.update({i: 'blue' for i in 'abc'})\n+ colors = kwargs.pop('colors', std_colors)\n+ tol = kwargs.pop('accuracy', 1e-5)\n+ parsed = parser.parse(peptide, True)\n+ n = len(parsed)\n+ mz, names = {}, {}\n+ for ion in types:\n+ for charge in range(1, maxcharge+1):\n+ if ion in 'abc':\n+ for i in range(2, n):\n+ mz.setdefault(ion, []).append(mass.fast_mass2(parsed[1:i], aa_mass=aa_mass, charge=charge, ion_type=ion))\n+ names.setdefault(ion, []).append(ion + str(i-1))\n+ else:\n+ for i in range(1, n-2):\n+ mz.setdefault(ion, []).append(mass.fast_mass2(parsed[n-(i+1):-1], aa_mass=aa_mass, charge=charge, ion_type=ion))\n+ names.setdefault(ion, []).append(ion + str(i))\n+\n+ plot_spectrum(spectrum, centroided, *args, **kwargs)\n+ for ion in types:\n+ c = colors.get(ion, 'blue')\n+ match = np.where(np.abs(spectrum['m/z array'] - np.array(mz[ion]).reshape(-1, 1)) / spectrum['m/z array'] < tol)\n+ pseudo_spec = {'m/z array': spectrum['m/z array'][match[1]], 'intensity array': spectrum['intensity array'][match[1]]}\n+ plot_spectrum(pseudo_spec, centroided=True, edgecolor=c)\n+ for j, i in zip(*match):\n+ x = spectrum['m/z array'][i]\n+ y = spectrum['intensity array'][i]\n+ name = names[ion][j]\n+ pylab.text(x, y, name, color=c, ha='center', clip_on=True)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Draft annotate_spectrum function |
377,522 | 04.02.2019 19:39:34 | -10,800 | c99c608c9a6c428973dbd56f43e7d32610dba9f0 | Allow empty index keys in XML | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -815,7 +815,7 @@ class ByteCountingXMLScanner(_file_obj):\ni = 0\npacked = b\"|\".join(self.indexed_tags)\npattern = re.compile((r\"^\\s*<(%s)\\s\" % packed.decode()).encode())\n- attrs = re.compile(br\"(\\S+)=[\\\"']([^\\\"']+)[\\\"']\")\n+ attrs = re.compile(br\"(\\S+)=[\\\"']([^\\\"']*)[\\\"']\")\nfor line in self._chunk_iterator():\nmatch = pattern.match(line)\nif match:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Allow empty index keys in XML |
377,522 | 05.02.2019 17:40:29 | -10,800 | 30f43c507d843d506b1229f50cebb392f734ddce | More permissive title matching in mgf | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -251,7 +251,7 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\n\"\"\"\ndelimiter = 'BEGIN IONS'\n- label = r'TITLE=([^\\n]*\\w)\\s*'\n+ label = r'TITLE=([^\\n]*\\S)\\s*'\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\ndtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | More permissive title matching in mgf |
377,522 | 06.02.2019 15:44:15 | -10,800 | c43d75ade257d966b708a7e3410afca1c7e2ce44 | Adjust annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -343,26 +343,27 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\ntol = kwargs.pop('accuracy', 1e-5)\nparsed = parser.parse(peptide, True)\nn = len(parsed)\n+ maxpeak = spectrum['intensity array'].max()\nmz, names = {}, {}\nfor ion in types:\nfor charge in range(1, maxcharge+1):\nif ion in 'abc':\nfor i in range(2, n):\nmz.setdefault(ion, []).append(mass.fast_mass2(parsed[1:i], aa_mass=aa_mass, charge=charge, ion_type=ion))\n- names.setdefault(ion, []).append(ion + str(i-1))\n+ names.setdefault(ion, []).append(ion[0] + str(i-1) + ion[1:])\nelse:\nfor i in range(1, n-2):\nmz.setdefault(ion, []).append(mass.fast_mass2(parsed[n-(i+1):-1], aa_mass=aa_mass, charge=charge, ion_type=ion))\n- names.setdefault(ion, []).append(ion + str(i))\n+ names.setdefault(ion, []).append(ion[0] + str(i) + ion[1:])\nplot_spectrum(spectrum, centroided, *args, **kwargs)\nfor ion in types:\n- c = colors.get(ion, 'blue')\n+ c = colors.get(ion, colors.get(ion[0], 'blue'))\nmatch = np.where(np.abs(spectrum['m/z array'] - np.array(mz[ion]).reshape(-1, 1)) / spectrum['m/z array'] < tol)\npseudo_spec = {'m/z array': spectrum['m/z array'][match[1]], 'intensity array': spectrum['intensity array'][match[1]]}\nplot_spectrum(pseudo_spec, centroided=True, edgecolor=c)\nfor j, i in zip(*match):\nx = spectrum['m/z array'][i]\n- y = spectrum['intensity array'][i]\n+ y = spectrum['intensity array'][i] + maxpeak * 0.02\nname = names[ion][j]\npylab.text(x, y, name, color=c, ha='center', clip_on=True)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Adjust annotate_spectrum |
377,522 | 06.02.2019 19:05:40 | -10,800 | a1e623e4fa105e317b800488d5e3a2a38dcbdae7 | Add adjustment for text | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -340,7 +340,20 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nstd_colors = {i: 'red' for i in 'xyz'}\nstd_colors.update({i: 'blue' for i in 'abc'})\ncolors = kwargs.pop('colors', std_colors)\n- tol = kwargs.pop('accuracy', 1e-5)\n+ ftol = kwargs.pop('ftol', None)\n+ if ftol is None:\n+ rtol = kwargs.pop('rtol', 1e-5)\n+ adjust = kwargs.pop('adjust_text', None)\n+ if adjust or adjust is None:\n+ try:\n+ from adjustText import adjust_text\n+ except ImportError:\n+ if adjust:\n+ raise PyteomicsError('Install adjustText for text adjustment')\n+ adjust = False\n+ else:\n+ if adjust is None:\n+ adjust = True\nparsed = parser.parse(peptide, True)\nn = len(parsed)\nmaxpeak = spectrum['intensity array'].max()\n@@ -357,13 +370,20 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nnames.setdefault(ion, []).append(ion[0] + str(i) + ion[1:])\nplot_spectrum(spectrum, centroided, *args, **kwargs)\n+ texts = []\nfor ion in types:\nc = colors.get(ion, colors.get(ion[0], 'blue'))\n- match = np.where(np.abs(spectrum['m/z array'] - np.array(mz[ion]).reshape(-1, 1)) / spectrum['m/z array'] < tol)\n+ matrix = np.abs(spectrum['m/z array'] - np.array(mz[ion]).reshape(-1, 1))\n+ if ftol is not None:\n+ match = np.where(matrix < ftol)\n+ else:\n+ match = np.where(matrix / spectrum['m/z array'] < rtol)\npseudo_spec = {'m/z array': spectrum['m/z array'][match[1]], 'intensity array': spectrum['intensity array'][match[1]]}\nplot_spectrum(pseudo_spec, centroided=True, edgecolor=c)\nfor j, i in zip(*match):\nx = spectrum['m/z array'][i]\ny = spectrum['intensity array'][i] + maxpeak * 0.02\nname = names[ion][j]\n- pylab.text(x, y, name, color=c, ha='center', clip_on=True)\n+ texts.append(pylab.text(x, y, name, color=c, ha='center', clip_on=True, backgroundcolor='#ffffff99'))\n+ if adjust:\n+ adjust_text(texts, only_move={'text': 'y', 'points': 'y', 'objects': 'y'}, autoalign=False, force_text=(1, 1))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add adjustment for text |
377,522 | 08.02.2019 18:50:25 | -10,800 | 5646a76ee9cdb92e544484e26e2a87cc1eb8fade | Add doc for annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- New module :py:mod:`pyteomics.ms2` provides a parser for **ms2** files.\nThis is in fact an alias to :py:mod:`ms1`, which handles both formats.\n-- New helper function :py:func:`pyteomics.pylab_aux.plot_spectrum`.\n+- New helper functions :py:func:`pyteomics.pylab_aux.plot_spectrum` and :py:func:`pyteomics.pylab_aux.annotate_spectrum`.\n+\n+- Fixes.\n+\n4.0.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1a1\n\\ No newline at end of file\n+4.1a2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -22,6 +22,8 @@ Spectrum visualization\n:py:func:`plot_spectrum` - plot a single spectrum (m/z vs intensity).\n+ :py:func:`annotate_spectrum` - plot and annotate peaks in MS/MS spectrum.\n+\nFDR control\n-----------\n@@ -334,15 +336,48 @@ def plot_spectrum(spectrum, centroided=True, *args, **kwargs):\ndef annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\n+ \"\"\"Plot a spectrum and annotate matching fragment peaks.\n+\n+ Parameters\n+ ----------\n+ spectrum : dict\n+ A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.\n+ peptide : str\n+ A modX sequence.\n+ centroided : bool, optional\n+ Passed to :py:func:`plot_spectrum`.\n+ types : Container, optional\n+ Ion types to be considered for annotation. Default is `('b', 'y')`.\n+ colors : dict, optional\n+ Keys are ion types, values are colors to plot the annotated peaks with. Defaults to a red-blue scheme.\n+ ftol : float, optional\n+ A fixed m/z tolerance value for peak matching. Alternative to `rtol`.\n+ rtol : float, optional\n+ A relative m/z error for peak matching. Default is 10 ppm.\n+ adjust_text : bool, optional\n+ Adjust the overlapping text annotations using :py:mod:`adjustText`.\n+ text_kw : dict, optional\n+ Keyword arguments for :py:func:`pylab.text`.\n+ ion_comp : dict, optional\n+ A dictionary defining definitions of ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\n+ mass_data : dict, optional\n+ A dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.\n+ aa_mass : dict, optional\n+ A dictionary of amino acid residue masses.\n+ *args, **kwargs : passed to :py:func:`plot_spectrum`.\n+ \"\"\"\ntypes = kwargs.pop('types', ('b', 'y'))\nmaxcharge = kwargs.pop('maxcharge', 1)\naa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)\n+ mass_data = kwargs.pop('mass_data', mass.nist_mass)\n+ ion_comp = kwargs.pop('ion_comp', mass.std_ion_comp)\nstd_colors = {i: 'red' for i in 'xyz'}\nstd_colors.update({i: 'blue' for i in 'abc'})\ncolors = kwargs.pop('colors', std_colors)\nftol = kwargs.pop('ftol', None)\nif ftol is None:\nrtol = kwargs.pop('rtol', 1e-5)\n+ text_kw = kwargs.pop('text_kw', dict(ha='center', clip_on=True, backgroundcolor='#ffffff99'))\nadjust = kwargs.pop('adjust_text', None)\nif adjust or adjust is None:\ntry:\n@@ -362,11 +397,13 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nfor charge in range(1, maxcharge+1):\nif ion in 'abc':\nfor i in range(2, n):\n- mz.setdefault(ion, []).append(mass.fast_mass2(parsed[1:i], aa_mass=aa_mass, charge=charge, ion_type=ion))\n+ mz.setdefault(ion, []).append(mass.fast_mass2(parsed[1:i],\n+ aa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\nnames.setdefault(ion, []).append(ion[0] + str(i-1) + ion[1:])\nelse:\nfor i in range(1, n-2):\n- mz.setdefault(ion, []).append(mass.fast_mass2(parsed[n-(i+1):-1], aa_mass=aa_mass, charge=charge, ion_type=ion))\n+ mz.setdefault(ion, []).append(mass.fast_mass2(parsed[n-(i+1):-1],\n+ aa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\nnames.setdefault(ion, []).append(ion[0] + str(i) + ion[1:])\nplot_spectrum(spectrum, centroided, *args, **kwargs)\n@@ -384,6 +421,6 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nx = spectrum['m/z array'][i]\ny = spectrum['intensity array'][i] + maxpeak * 0.02\nname = names[ion][j]\n- texts.append(pylab.text(x, y, name, color=c, ha='center', clip_on=True, backgroundcolor='#ffffff99'))\n+ texts.append(pylab.text(x, y, name, color=c, **text_kw))\nif adjust:\nadjust_text(texts, only_move={'text': 'y', 'points': 'y', 'objects': 'y'}, autoalign=False, force_text=(1, 1))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add doc for annotate_spectrum |
377,522 | 11.02.2019 16:58:51 | -10,800 | 5e0e2369e1ac9b96ab2aa08f50287d2487353135 | Bugfix in annotate_spectrum mass calculation | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -358,6 +358,8 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nAdjust the overlapping text annotations using :py:mod:`adjustText`.\ntext_kw : dict, optional\nKeyword arguments for :py:func:`pylab.text`.\n+ adjust_kw : dict, optional\n+ Keyword argyuments for `:py:func:`adjust_text`.\nion_comp : dict, optional\nA dictionary defining definitions of ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.\nmass_data : dict, optional\n@@ -382,6 +384,8 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nif adjust or adjust is None:\ntry:\nfrom adjustText import adjust_text\n+ adjust_kw = kwargs.pop('adjust_kw', dict(\n+ only_move={'text': 'y', 'points': 'y', 'objects': 'y'}, autoalign=False, force_text=(1, 1)))\nexcept ImportError:\nif adjust:\nraise PyteomicsError('Install adjustText for text adjustment')\n@@ -397,12 +401,12 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nfor charge in range(1, maxcharge+1):\nif ion in 'abc':\nfor i in range(2, n):\n- mz.setdefault(ion, []).append(mass.fast_mass2(parsed[1:i],\n+ mz.setdefault(ion, []).append(mass.fast_mass2(parsed[:i] + [parsed[-1]],\naa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\nnames.setdefault(ion, []).append(ion[0] + str(i-1) + ion[1:])\nelse:\nfor i in range(1, n-2):\n- mz.setdefault(ion, []).append(mass.fast_mass2(parsed[n-(i+1):-1],\n+ mz.setdefault(ion, []).append(mass.fast_mass2([parsed[0]] + parsed[n-(i+1):],\naa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\nnames.setdefault(ion, []).append(ion[0] + str(i) + ion[1:])\n@@ -423,4 +427,4 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nname = names[ion][j]\ntexts.append(pylab.text(x, y, name, color=c, **text_kw))\nif adjust:\n- adjust_text(texts, only_move={'text': 'y', 'points': 'y', 'objects': 'y'}, autoalign=False, force_text=(1, 1))\n+ adjust_text(texts, **adjust_kw)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Bugfix in annotate_spectrum mass calculation |
377,522 | 13.02.2019 15:02:20 | -10,800 | 47827631c4f0f5e2f72351798d4545c94028e6a8 | Bugfix in annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1a2\n\\ No newline at end of file\n+4.1a3\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -399,7 +399,7 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nmz, names = {}, {}\nfor ion in types:\nfor charge in range(1, maxcharge+1):\n- if ion in 'abc':\n+ if ion[0] in 'abc':\nfor i in range(2, n):\nmz.setdefault(ion, []).append(mass.fast_mass2(parsed[:i] + [parsed[-1]],\naa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Bugfix in annotate_spectrum |
377,522 | 17.02.2019 01:58:33 | -10,800 | 896996c7c70b9bdfaea095d94b2465b25376e84d | Allow naming expasy rules in cleave | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- New helper functions :py:func:`pyteomics.pylab_aux.plot_spectrum` and :py:func:`pyteomics.pylab_aux.annotate_spectrum`.\n+- The `rule` and `exception` arguments in :py:func:`pyteomics.parser.cleave` can be keys from :py:const:`expasy_rules`.\n+\n- Fixes.\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1a3\n\\ No newline at end of file\n+4.1a4\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -207,10 +207,7 @@ _modX_sequence = re.compile(r'^([^-]+-)?((?:[^A-Z-]*[A-Z])+)(-[^-]+)?$')\n_modX_group = re.compile(r'[^A-Z-]*[A-Z]')\n_modX_split = re.compile(r'([^A-Z-]*)([A-Z])')\n-def parse(sequence,\n- show_unmodified_termini=False, split=False,\n- allow_unknown_modifications=False,\n- **kwargs):\n+def parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_modifications=False, **kwargs):\n\"\"\"Parse a sequence string written in modX notation into a list of\nlabels or (if `split` argument is :py:const:`True`) into a list of\ntuples representing amino acid residues and their modifications.\n@@ -417,11 +414,7 @@ def tostring(parsed_sequence, show_unmodified_termini=True):\nlabels.append(''.join(cterm[:-1]))\nreturn ''.join(labels)\n-def amino_acid_composition(sequence,\n- show_unmodified_termini=False,\n- term_aa=False,\n- allow_unknown_modifications=False,\n- **kwargs):\n+def amino_acid_composition(sequence, show_unmodified_termini=False, term_aa=False, allow_unknown_modifications=False, **kwargs):\n\"\"\"Calculate amino acid composition of a polypeptide.\nParameters\n@@ -496,8 +489,7 @@ def amino_acid_composition(sequence,\nreturn aa_dict\n@memoize()\n-def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False,\n- exception=None):\n+def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exception=None):\n\"\"\"Cleaves a polypeptide sequence using a given rule.\nParameters\n@@ -511,7 +503,8 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False,\nwill not work as expected.\nrule : str or compiled regex\n- A `regular expression <https://docs.python.org/library/re.html#regular-expression-syntax>`_\n+ A key present in :py:const:`expasy_rules` or a\n+ `regular expression <https://docs.python.org/library/re.html#regular-expression-syntax>`_\ndescribing the site of cleavage. It is recommended\nto design the regex so that it matches only the residue whose C-terminal\nbond is to be cleaved. All additional requirements should be specified\n@@ -534,8 +527,8 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False,\nThis effectively cuts every peptide at every position and adds results to the output.\nexception : str or compiled RE or None, optional\n- Exceptions to the cleavage rule. If specified, should be a regular expression.\n- Cleavage sites matching `rule` will be checked against `exception` and omitted\n+ Exceptions to the cleavage rule. If specified, should be a key present in :py:const:`expasy_rules`\n+ or regular expression. Cleavage sites matching `rule` will be checked against `exception` and omitted\nif they match.\nReturns\n@@ -547,6 +540,8 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False,\n--------\n>>> cleave('AKAKBK', expasy_rules['trypsin'], 0) == {'AK', 'BK'}\nTrue\n+ >>> cleave('AKAKBK', 'trypsin', 0) == {'AK', 'BK'}\n+ True\n>>> cleave('GKGKYKCK', expasy_rules['trypsin'], 2) == \\\n{'CK', 'GKYK', 'YKCK', 'GKGK', 'GKYKCK', 'GK', 'GKGKYK', 'YK'}\nTrue\n@@ -558,6 +553,8 @@ def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exc\n\"\"\"Like :py:func:`cleave`, but the result is a list. Refer to\n:py:func:`cleave` for explanation of parameters.\n\"\"\"\n+ rule = expasy_rules.get(rule, rule)\n+ exception = expasy_rules.get(exception, exception)\npeptides = []\nml = missed_cleavages+2\ntrange = range(ml)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -62,9 +62,10 @@ class ParserTest(unittest.TestCase):\ndef test_cleave(self):\nself.assertEqual(parser._cleave('PEPTIDEKS', parser.expasy_rules['trypsin']), ['PEPTIDEK', 'S'])\n+ self.assertEqual(parser._cleave('PEPTIDEKS', 'trypsin'), ['PEPTIDEK', 'S'])\nfor seq in self.simple_sequences:\nfor elem in parser.cleave(\n- seq, parser.expasy_rules['trypsin'], int(random.uniform(1, 10))):\n+ seq, 'trypsin', int(random.uniform(1, 10))):\nself.assertIn(elem, seq)\nself.assertTrue(any(elem == seq\nfor elem in parser.cleave(seq, parser.expasy_rules['trypsin'], len(seq))))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Allow naming expasy rules in cleave |
377,522 | 18.02.2019 17:40:07 | -10,800 | 3358544005937aa4c14cd96927df0f0817b2827d | Add index saving to pepxml | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -97,7 +97,7 @@ This module requires :py:mod:`lxml`.\nfrom lxml import etree\nfrom . import xml, auxiliary as aux, _schema_defaults\n-class PepXML(xml.MultiProcessingXML):\n+class PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):\n\"\"\"Parser class for pepXML files.\"\"\"\nfile_format = 'pepXML'\n_root_element = 'msms_pipeline_analysis'\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add index saving to pepxml |
377,522 | 22.02.2019 19:31:56 | -10,800 | 8e71ab58df089502ddaf8d22532bae7fe00cd9ba | Fix title in annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -377,6 +377,7 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nstd_colors.update({i: 'blue' for i in 'abc'})\ncolors = kwargs.pop('colors', std_colors)\nftol = kwargs.pop('ftol', None)\n+ title = kwargs.pop('title', '')\nif ftol is None:\nrtol = kwargs.pop('rtol', 1e-5)\ntext_kw = kwargs.pop('text_kw', dict(ha='center', clip_on=True, backgroundcolor='#ffffff99'))\n@@ -428,3 +429,4 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\ntexts.append(pylab.text(x, y, name, color=c, **text_kw))\nif adjust:\nadjust_text(texts, **adjust_kw)\n+ pylab.title(title)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix title in annotate_spectrum |
377,522 | 24.02.2019 23:11:39 | -10,800 | 0aeb8fa792ae2e5b3d24c1cf150feacdb95a26ee | A more proper fix | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -377,7 +377,6 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nstd_colors.update({i: 'blue' for i in 'abc'})\ncolors = kwargs.pop('colors', std_colors)\nftol = kwargs.pop('ftol', None)\n- title = kwargs.pop('title', '')\nif ftol is None:\nrtol = kwargs.pop('rtol', 1e-5)\ntext_kw = kwargs.pop('text_kw', dict(ha='center', clip_on=True, backgroundcolor='#ffffff99'))\n@@ -411,7 +410,6 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\naa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\nnames.setdefault(ion, []).append(ion[0] + str(i) + ion[1:])\n- plot_spectrum(spectrum, centroided, *args, **kwargs)\ntexts = []\nfor ion in types:\nc = colors.get(ion, colors.get(ion[0], 'blue'))\n@@ -429,4 +427,5 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\ntexts.append(pylab.text(x, y, name, color=c, **text_kw))\nif adjust:\nadjust_text(texts, **adjust_kw)\n- pylab.title(title)\n+ kwargs.setdefault('zorder', -1)\n+ plot_spectrum(spectrum, centroided, *args, **kwargs)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | A more proper fix |
377,522 | 25.02.2019 00:40:21 | -10,800 | dfd7858a76724fab57fc129bedb371c3e7fc2619 | Extend pd_kwargs support | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -439,7 +439,7 @@ def filter_df(*args, **kwargs):\nelse:\ndf = args[0]\nelse:\n- read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\n+ read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep', 'pd_kwargs'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\nif 'is_decoy' not in kwargs:\nif sep is not None:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -218,8 +218,6 @@ def DataFrame(*args, **kwargs):\nParameters\n----------\n- *args, **kwargs : passed to :py:func:`chain`\n-\nsep : str or None, optional\nSome values related to protein groups are variable-length lists.\nIf `sep` is a :py:class:`str`, they will be packed into single string using\n@@ -229,6 +227,8 @@ def DataFrame(*args, **kwargs):\npd_kwargs : dict, optional\nKeyword arguments passed to the :py:class:`pandas.DataFrame` constructor.\n+ *args, **kwargs : passed to :py:func:`chain`.\n+\nReturns\n-------\nout : pandas.DataFrame\n@@ -292,7 +292,7 @@ def filter_df(*args, **kwargs):\nelse:\ndf = args[0]\nelse:\n- read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\n+ read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep', 'pd_kwargs'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\nif 'is_decoy' not in kwargs:\nif 'decoy_suffix' in kwargs:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -268,7 +268,6 @@ def DataFrame(*args, **kwargs):\nParameters\n----------\n- *args, **kwargs : passed to :py:func:`chain`\nsep : str or None, optional\nSome values related to PSMs (such as protein information) are variable-length\n@@ -276,6 +275,11 @@ def DataFrame(*args, **kwargs):\nthis delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n:py:const:`None`.\n+ pd_kwargs : dict, optional\n+ Keyword arguments passed to the :py:class:`pandas.DataFrame` constructor.\n+\n+ *args, **kwargs : passed to :py:func:`chain`.\n+\nReturns\n-------\nout : pandas.DataFrame\n@@ -285,6 +289,7 @@ def DataFrame(*args, **kwargs):\nprot_keys = ['id', 'uid', 'label', 'expect']\npep_keys = ['id', 'pre', 'post', 'start', 'end']\nsep = kwargs.pop('sep', None)\n+ pd_kwargs = kwargs.pop('pd_kwargs', {})\nwith chain(*args, **kwargs) as f:\nfor item in f:\ninfo = {}\n@@ -312,7 +317,7 @@ def DataFrame(*args, **kwargs):\ninfo.update(protein['peptide'])\ninfo['scan'] = item['support']['fragment ion mass spectrum']['note']\ndata.append(info)\n- return pd.DataFrame(data)\n+ return pd.DataFrame(data, **pd_kwargs)\ndef filter_df(*args, **kwargs):\n\"\"\"Read X!Tandem output files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\n@@ -341,7 +346,7 @@ def filter_df(*args, **kwargs):\nelse:\ndf = args[0]\nelse:\n- read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\n+ read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep', 'pd_kwargs'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\nif 'is_decoy' not in kwargs:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Extend pd_kwargs support |
377,522 | 26.02.2019 17:23:26 | -10,800 | 8da4324688b5d2a3d1fe8de9024a79003c7af5ef | Another fix in annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -393,7 +393,7 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nelse:\nif adjust is None:\nadjust = True\n- parsed = parser.parse(peptide, True)\n+ parsed = parser.parse(peptide, True, labels=list(aa_mass) + [parser.std_cterm, parser.std_nterm])\nn = len(parsed)\nmaxpeak = spectrum['intensity array'].max()\nmz, names = {}, {}\n@@ -401,15 +401,14 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nfor charge in range(1, maxcharge+1):\nif ion[0] in 'abc':\nfor i in range(2, n):\n- mz.setdefault(ion, []).append(mass.fast_mass2(parsed[:i] + [parsed[-1]],\n+ mz.setdefault(ion, []).append(mass.fast_mass2(parsed[:i] + [parser.std_cterm],\naa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\nnames.setdefault(ion, []).append(ion[0] + str(i-1) + ion[1:])\nelse:\n- for i in range(1, n-2):\n- mz.setdefault(ion, []).append(mass.fast_mass2([parsed[0]] + parsed[n-(i+1):],\n+ for i in range(1, n-1):\n+ mz.setdefault(ion, []).append(mass.fast_mass2([parser.std_nterm] + parsed[n-(i+1):],\naa_mass=aa_mass, charge=charge, ion_type=ion, mass_data=mass_data, ion_comp=ion_comp))\nnames.setdefault(ion, []).append(ion[0] + str(i) + ion[1:])\n-\ntexts = []\nfor ion in types:\nc = colors.get(ion, colors.get(ion[0], 'blue'))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Another fix in annotate_spectrum |
377,522 | 28.02.2019 12:22:56 | -10,800 | b75bcbb4ef509cc8a2b05bbaf9b00e9b3168a60f | Update is_modX and is_term_mod; fix issue | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1a4\n\\ No newline at end of file\n+4.1a5\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -116,6 +116,9 @@ std_cterm = '-OH'\nstd_labels = std_amino_acids + [std_nterm, std_cterm]\n\"\"\"modX labels for the standard amino acids and unmodified termini.\"\"\"\n+_nterm_mod = r'[^-]+-$'\n+_cterm_mod = r'-[^-]+$'\n+\ndef is_term_mod(label):\n\"\"\"Check if `label` corresponds to a terminal modification.\n@@ -126,8 +129,21 @@ def is_term_mod(label):\nReturns\n-------\nout : bool\n+\n+ Examples\n+ --------\n+ >>> is_term_mod('A')\n+ False\n+ >>> is_term_mod('Ac-')\n+ True\n+ >>> is_term_mod('-customGroup')\n+ True\n+ >>> is_term_mod('this-group-')\n+ False\n+ >>> is_term_mod('-')\n+ False\n\"\"\"\n- return label[0] == '-' or label[-1] == '-'\n+ return (re.match(_nterm_mod, label) or re.match(_cterm_mod, label)) is not None\ndef match_modX(label):\n\"\"\"Check if `label` is a valid 'modX' label.\n@@ -140,7 +156,7 @@ def match_modX(label):\n-------\nout : re.match or None\n\"\"\"\n- return re.match(_modX_split, label)\n+ return re.match(_modX_single, label)\ndef is_modX(label):\n\"\"\"Check if `label` is a valid 'modX' label.\n@@ -152,6 +168,17 @@ def is_modX(label):\nReturns\n-------\nout : bool\n+\n+ Examples\n+ --------\n+ >>> is_modX('M')\n+ True\n+ >>> is_modX('oxM')\n+ True\n+ >>> is_modX('oxMet')\n+ False\n+ >>> is_modX('160C')\n+ True\n\"\"\"\nreturn bool(match_modX(label))\n@@ -167,6 +194,10 @@ def length(sequence, **kwargs):\nlabels : list, optional\nA list of allowed labels for amino acids and terminal modifications.\n+ Returns\n+ -------\n+ out : int\n+\nExamples\n--------\n>>> length('PEPTIDE')\n@@ -206,6 +237,7 @@ def _split_label(label):\n_modX_sequence = re.compile(r'^([^-]+-)?((?:[^A-Z-]*[A-Z])+)(-[^-]+)?$')\n_modX_group = re.compile(r'[^A-Z-]*[A-Z]')\n_modX_split = re.compile(r'([^A-Z-]*)([A-Z])')\n+_modX_single = re.compile(r'^([^A-Z-]*)([A-Z])$')\ndef parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_modifications=False, **kwargs):\n\"\"\"Parse a sequence string written in modX notation into a list of\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update is_modX and is_term_mod; fix issue #36 |
377,522 | 28.02.2019 19:26:25 | -10,800 | 443af840a27e416532c97cc4e05217e19e0de89b | Add primitive index saving for text readers | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- New module :py:mod:`pyteomics.ms2` provides a parser for **ms2** files.\nThis is in fact an alias to :py:mod:`ms1`, which handles both formats.\n+- Added index saving functionality for :py:class:`pyteomics.mgf.IndexedMGF`.\n+\n- New helper functions :py:func:`pyteomics.pylab_aux.plot_spectrum` and :py:func:`pyteomics.pylab_aux.annotate_spectrum`.\n- The `rule` and `exception` arguments in :py:func:`pyteomics.parser.cleave` can be keys from :py:const:`expasy_rules`.\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1a5\n\\ No newline at end of file\n+4.1b1\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -16,7 +16,7 @@ from .constants import _nist_mass\nfrom .file_helpers import (\n_file_obj, _keepstate, _keepstate_method, IteratorContextManager,\nFileReader, IndexedTextReader, IndexedReaderMixin, TimeOrderedIndexedReaderMixin,\n- OffsetIndex, HierarchicalOffsetIndex,\n+ IndexSavingMixin, OffsetIndex, HierarchicalOffsetIndex, IndexSavingTextReader,\n_file_reader, _file_writer,\n_make_chain, _check_use_index, FileReadingProcess, TaskMappingMixin,\nserializer, ChainBase, TableJoiner)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -8,6 +8,7 @@ import json\nimport multiprocessing as mp\nimport threading\nimport warnings\n+import os\ntry:\nbasestring\n@@ -474,6 +475,72 @@ class IndexedTextReader(IndexedReaderMixin, FileReader):\nreturn lines\n+class IndexSavingMixin(object):\n+ \"\"\"Common interface for :py:class:`IndexSavingXML` and :py:class:`IndexSavingTextReader`.\"\"\"\n+ _index_class = NotImplemented\n+\n+ @property\n+ def _byte_offset_filename(self):\n+ try:\n+ path = self._source.name\n+ except AttributeError:\n+ return None\n+ byte_offset_filename = os.path.splitext(path)[0] + '-byte-offsets.json'\n+ return byte_offset_filename\n+\n+ def _check_has_byte_offset_file(self):\n+ \"\"\"Check if the file at :attr:`_byte_offset_filename` exists\n+\n+ Returns\n+ -------\n+ bool\n+ Whether the file exists\n+ \"\"\"\n+ path = self._byte_offset_filename\n+ if path is None:\n+ return False\n+ return os.path.exists(path)\n+\n+ @classmethod\n+ def prebuild_byte_offset_file(cls, path):\n+ \"\"\"Construct a new XML reader, build its byte offset index and\n+ write it to file\n+\n+ Parameters\n+ ----------\n+ path : str\n+ The path to the file to parse\n+ \"\"\"\n+ with cls(path) as inst:\n+ inst.write_byte_offsets()\n+\n+ def write_byte_offsets(self):\n+ \"\"\"Write the byte offsets in :attr:`_offset_index` to the file\n+ at :attr:`_byte_offset_filename`\n+ \"\"\"\n+ with open(self._byte_offset_filename, 'w') as f:\n+ self._offset_index.save(f)\n+\n+ @_keepstate_method\n+ def _build_index(self):\n+ \"\"\"Build the byte offset index by either reading these offsets\n+ from the file at :attr:`_byte_offset_filename`, or falling back\n+ to the method used by :class:`IndexedXML` if this operation fails\n+ due to an IOError\n+ \"\"\"\n+ try:\n+ self._read_byte_offsets()\n+ except (IOError, AttributeError, TypeError):\n+ super(IndexSavingMixin, self)._build_index()\n+\n+ def _read_byte_offsets(self):\n+ \"\"\"Read the byte offset index JSON file at :attr:`_byte_offset_filename`\n+ and populate :attr:`_offset_index`\n+ \"\"\"\n+ with open(self._byte_offset_filename, 'r') as f:\n+ index = self._index_class.load(f)\n+ self._offset_index = index\n+\ndef _file_reader(_mode='r'):\n# a lot of the code below is borrowed from\n@@ -635,6 +702,18 @@ class OffsetIndex(OrderedDict):\nself[key] = value\nreturn self\n+ def save(self, fp):\n+ json.dump(self, fp)\n+\n+ @classmethod\n+ def load(cls, fp):\n+ index = json.load(fp, object_hook=OrderedDict)\n+ return cls(index)\n+\n+\n+class IndexSavingTextReader(IndexSavingMixin, IndexedTextReader):\n+ _index_class = OffsetIndex\n+\nclass HierarchicalOffsetIndex(object):\nschema_version = (1, 0, 0)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -226,7 +226,7 @@ class MGFBase():\nreturn self.get_spectrum(key)\n-class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexedTextReader, MGFBase):\n+class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexSavingTextReader, MGFBase):\n\"\"\"\nA class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\nparsing. Specific spectra can be accessed by title using the indexing syntax in constant time.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -32,7 +32,6 @@ import socket\nfrom traceback import format_exc\nimport operator as op\nimport ast\n-import os\nimport warnings\nfrom collections import OrderedDict, namedtuple\nfrom lxml import etree\n@@ -42,7 +41,7 @@ from .auxiliary import FileReader, PyteomicsError, basestring, _file_obj, Hierar\nfrom .auxiliary import unitint, unitfloat, unitstr, cvstr\nfrom .auxiliary import _keepstate_method as _keepstate\nfrom .auxiliary import BinaryDataArrayTransformer\n-from .auxiliary import TaskMappingMixin, IndexedReaderMixin\n+from .auxiliary import TaskMappingMixin, IndexedReaderMixin, IndexSavingMixin\ntry: # Python 2.7\nfrom urllib2 import urlopen, URLError\n@@ -1112,75 +1111,23 @@ class MultiProcessingXML(IndexedXML, TaskMappingMixin):\nreturn iter(self._offset_index[self._default_iter_tag])\n-class IndexSavingXML(IndexedXML):\n+class IndexSavingXML(IndexSavingMixin, IndexedXML):\n\"\"\"An extension to the IndexedXML type which\nadds facilities to read and write the byte offset\nindex externally.\n\"\"\"\n-\n- @property\n- def _byte_offset_filename(self):\n- try:\n- path = self._source.name\n- except AttributeError:\n- return None\n- byte_offset_filename = os.path.splitext(path)[0] + '-byte-offsets.json'\n- return byte_offset_filename\n-\n- def _check_has_byte_offset_file(self):\n- \"\"\"Check if the file at :attr:`_byte_offset_filename` exists\n-\n- Returns\n- -------\n- bool\n- Whether the file exists\n- \"\"\"\n- path = self._byte_offset_filename\n- if path is None:\n- return False\n- return os.path.exists(path)\n+ _index_class = HierarchicalOffsetIndex\ndef _read_byte_offsets(self):\n\"\"\"Read the byte offset index JSON file at :attr:`_byte_offset_filename`\nand populate :attr:`_offset_index`\n\"\"\"\nwith open(self._byte_offset_filename, 'r') as f:\n- index = HierarchicalOffsetIndex.load(f)\n+ index = self._index_class.load(f)\nif index.schema_version is None:\nraise TypeError(\"Legacy Offset Index!\")\nself._offset_index = index\n- def write_byte_offsets(self):\n- \"\"\"Write the byte offsets in :attr:`_offset_index` to the file\n- at :attr:`_byte_offset_filename`\n- \"\"\"\n- with open(self._byte_offset_filename, 'w') as f:\n- self._offset_index.save(f)\n-\n- @_keepstate\n- def _build_index(self):\n- \"\"\"Build the byte offset index by either reading these offsets\n- from the file at :attr:`_byte_offset_filename`, or falling back\n- to the method used by :class:`IndexedXML` if this operation fails\n- due to an IOError\n- \"\"\"\n- try:\n- self._read_byte_offsets()\n- except (IOError, AttributeError, TypeError):\n- super(IndexSavingXML, self)._build_index()\n-\n- @classmethod\n- def prebuild_byte_offset_file(cls, path):\n- \"\"\"Construct a new XML reader, build its byte offset index and\n- write it to file\n-\n- Parameters\n- ----------\n- path : str\n- The path to the file to parse\n- \"\"\"\n- with cls(path, use_index=True) as inst:\n- inst.write_byte_offsets()\nclass ArrayConversionMixin(BinaryDataArrayTransformer):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "-from os import path\n+import os\nimport numpy as np\nimport pyteomics\n-pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\nimport pickle\n-from pyteomics import mgf\n+import shutil\n+from pyteomics import mgf, auxiliary as aux\nimport data\nclass MGFTest(unittest.TestCase):\n@@ -128,5 +129,31 @@ class MGFTest(unittest.TestCase):\nspectra = sorted(list(reader.map()), key=lambda s: s['params']['title'])\nself.assertEqual(data.mgf_spectra_long, spectra)\n+ def test_prebuild_index(self):\n+ test_dir = tempfile.mkdtemp()\n+ work_path = os.path.join(test_dir, self.path)\n+ with open(work_path, 'w') as dest, open(self.path) as source:\n+ dest.write(source.read())\n+ assert dest.closed\n+ with mgf.IndexedMGF(work_path) as inst:\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\n+ self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\n+ self.assertTrue(isinstance(inst._offset_index, aux.OffsetIndex))\n+ self.assertTrue(inst._source.closed)\n+ mgf.IndexedMGF.prebuild_byte_offset_file(work_path)\n+ with mgf.IndexedMGF(work_path) as inst:\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\n+ self.assertTrue(offsets_exist)\n+ self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\n+ self.assertTrue(isinstance(inst._offset_index, aux.OffsetIndex))\n+ self.assertTrue(inst._source.closed)\n+ os.remove(inst._byte_offset_filename)\n+ with mgf.IndexedMGF(work_path) as inst:\n+ offsets_exist = os.path.exists(inst._byte_offset_filename)\n+ self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\n+ self.assertTrue(isinstance(inst._offset_index, aux.OffsetIndex))\n+ self.assertTrue(inst._source.closed)\n+ shutil.rmtree(test_dir, True)\n+\nif __name__ == \"__main__\":\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add primitive index saving for text readers |
377,522 | 04.03.2019 00:22:13 | -10,800 | d546984df0826d52761ee5205e3aed47bca2335b | Add schema versioning for index saving text readers, refactor stuff | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1b1\n\\ No newline at end of file\n+4.1b2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -485,7 +485,8 @@ class IndexSavingMixin(object):\npath = self._source.name\nexcept AttributeError:\nreturn None\n- byte_offset_filename = os.path.splitext(path)[0] + '-byte-offsets.json'\n+ name, ext = os.path.splitext(path)\n+ byte_offset_filename = '{}-{}-byte-offsets.json'.format(name, ext[1:])\nreturn byte_offset_filename\ndef _check_has_byte_offset_file(self):\n@@ -580,11 +581,42 @@ def _file_writer(_mode='a'):\nreturn decorator\n-class OffsetIndex(OrderedDict):\n+class WritableIndex(object):\n+ schema_version = (1, 0, 0)\n+ _schema_version_tag_key = \"@pyteomics_schema_version\"\n+\n+ def _serializable_container(self):\n+ container = {'index': list(self.items())}\n+ return container\n+\n+ def save(self, fp):\n+ container = self._serializable_container()\n+ container[self._schema_version_tag_key] = self.schema_version\n+ json.dump(container, fp)\n+\n+ @classmethod\n+ def load(cls, fp):\n+ container = json.load(fp, object_hook=OrderedDict)\n+ version_tag = container.get(cls._schema_version_tag_key)\n+ if version_tag is None:\n+ # The legacy case, no special processing yet\n+ inst = cls({}, None)\n+ inst.schema_version = None\n+ return inst\n+ version_tag = tuple(version_tag)\n+ index = container.get(\"index\")\n+ if version_tag < cls.schema_version:\n+ # schema upgrade case, no special processing yet\n+ return cls(index, version_tag)\n+ # no need to upgrade\n+ return cls(index, version_tag)\n+\n+\n+class OffsetIndex(OrderedDict, WritableIndex):\n'''An augmented OrderedDict that formally wraps getting items by index\n'''\ndef __init__(self, *args, **kwargs):\n- OrderedDict.__init__(self, *args, **kwargs)\n+ super(OffsetIndex, self).__init__(*args, **kwargs)\nself._index_sequence = None\ndef _invalidate(self):\n@@ -702,29 +734,15 @@ class OffsetIndex(OrderedDict):\nself[key] = value\nreturn self\n- def save(self, fp):\n- json.dump(self, fp)\n-\n- @classmethod\n- def load(cls, fp):\n- index = json.load(fp, object_hook=OrderedDict)\n- return cls(index)\n-\nclass IndexSavingTextReader(IndexSavingMixin, IndexedTextReader):\n_index_class = OffsetIndex\n-class HierarchicalOffsetIndex(object):\n- schema_version = (1, 0, 0)\n-\n- _schema_version_tag_key = \"@pyteomics_schema_version\"\n+class HierarchicalOffsetIndex(WritableIndex):\n_inner_type = OffsetIndex\n- def __init__(self, base=None, schema_version=None):\n- if schema_version is None:\n- schema_version = self.schema_version\n- self.schema_version = schema_version\n+ def __init__(self, base=None):\nself.mapping = defaultdict(self._inner_type)\nfor key, value in (base or {}).items():\nself.mapping[key] = self._inner_type(value)\n@@ -781,34 +799,15 @@ class HierarchicalOffsetIndex(object):\ndef items(self):\nreturn self.mapping.items()\n- def save(self, fp):\n- encoded_index = dict()\n- keys = list(self.keys())\n+ def _serializable_container(self):\n+ encoded_index = {}\ncontainer = {\n- self._schema_version_tag_key: self.schema_version,\n- \"keys\": keys\n+ 'keys': list(self.keys())\n}\nfor key, offset in self.items():\nencoded_index[key] = list(offset.items())\ncontainer['index'] = encoded_index\n- json.dump(container, fp)\n-\n- @classmethod\n- def load(cls, fp):\n- container = json.load(fp, object_hook=OrderedDict)\n- version_tag = container.get(cls._schema_version_tag_key)\n- if version_tag is None:\n- # The legacy case, no special processing yet\n- inst = cls({}, None)\n- inst.schema_version = None\n- return inst\n- version_tag = tuple(version_tag)\n- index = container.get(\"index\")\n- if version_tag < cls.schema_version:\n- # schema upgrade case, no special processing yet\n- return cls(index, version_tag)\n- # no need to upgrade\n- return cls(index, version_tag)\n+ return container\ndef _make_chain(reader, readername, full_output=False):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -6,6 +6,8 @@ import tempfile\nimport unittest\nimport pickle\nimport shutil\n+import json\n+from collections import OrderedDict\nfrom pyteomics import mgf, auxiliary as aux\nimport data\n@@ -155,5 +157,22 @@ class MGFTest(unittest.TestCase):\nself.assertTrue(inst._source.closed)\nshutil.rmtree(test_dir, True)\n+ def test_write_index_keys(self):\n+ test_dir = tempfile.mkdtemp()\n+ work_path = os.path.join(test_dir, self.path)\n+ with open(work_path, 'w') as dest, open(self.path) as source:\n+ dest.write(source.read())\n+ assert dest.closed\n+ mgf.IndexedMGF.prebuild_byte_offset_file(work_path)\n+ with mgf.IndexedMGF(work_path) as inst:\n+ ipath = inst._byte_offset_filename\n+ with open(ipath) as ifp:\n+ container = json.load(ifp, object_hook=OrderedDict)\n+ tag_key = mgf.IndexedMGF._index_class._schema_version_tag_key\n+ self.assertEqual(set(container.keys()), {tag_key, 'index'})\n+ self.assertEqual(tuple(container[tag_key]), mgf.IndexedMGF._index_class.schema_version)\n+ self.assertEqual(container['index'], [('Spectrum 1', (217, 343)), ('Spectrum 2', (343, 506))])\n+\n+\nif __name__ == \"__main__\":\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add schema versioning for index saving text readers, refactor stuff |
377,522 | 20.03.2019 17:31:13 | -10,800 | 01ce5a8521c44e262bffdf011cc9e32869561f84 | Update pepxml test | [
{
"change_type": "MODIFY",
"old_path": "tests/test_pepxml.py",
"new_path": "tests/test_pepxml.py",
"diff": "@@ -8,11 +8,14 @@ from data import pepxml_results\nclass PepxmlTest(unittest.TestCase):\nmaxDiff = None\n+ path = 'test.pep.xml'\n+\n_kw = {'full_output': False, 'fdr': 1,\n'key': lambda x: min(\nsh['search_score'].get('expect', 1)\nfor sh in x['search_hit'])\n}\n+\ndef testReadPSM(self):\nfor rs, it in product([True, False], repeat=2):\nfor func in [PepXML, read, chain,\n@@ -20,8 +23,15 @@ class PepxmlTest(unittest.TestCase):\nlambda x, **kw: filter(x, **PepxmlTest._kw),\nlambda x, **kw: filter.chain(x, **PepxmlTest._kw),\nlambda x, **kw: filter.chain.from_iterable([x], **PepxmlTest._kw)]:\n- with func('test.pep.xml', read_schema=rs, iterative=it) as r:\n+ with func(self.path, read_schema=rs, iterative=it) as r:\nself.assertEqual(list(r), pepxml_results)\n+ def test_index(self):\n+ with PepXML(self.path) as reader:\n+ self.assertEqual(list(reader.index), ['spectrum_query'])\n+ specs = [item['spectrum'] for item in reader]\n+ self.assertEqual(list(reader.index['spectrum_query']), specs)\n+ self.assertEqual(reader[specs[-1]], pepxml_results[-1])\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update pepxml test |
377,522 | 02.04.2019 03:05:00 | -10,800 | 02e04b45e17a0e49b26a4a94496d266911b92c02 | Add proof-of-principle numpress support (issue | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/utils.py",
"new_path": "pyteomics/auxiliary/utils.py",
"diff": "@@ -2,7 +2,7 @@ from __future__ import print_function\nimport base64\nimport zlib\n-\n+import sys\nfrom functools import wraps\nfrom collections import namedtuple\n@@ -17,6 +17,10 @@ try:\nexcept ImportError:\nnp = None\n+try:\n+ import PyMSNumpress\n+except ImportError:\n+ PyMSNumpress = None\ndef print_tree(d, indent_str=' -> ', indent_count=1):\n\"\"\"Read a nested dict (with strings as keys) and print its structure.\n@@ -83,6 +87,29 @@ def _decode_base64_data_array(source, dtype, is_compressed):\nreturn output\n+_default_compression_map = {\n+ 'no compression': lambda x: x,\n+ 'zlib compression': zlib.decompress,\n+ }\n+\n+def _numpressDecompress(decoder):\n+ def decode(data):\n+ result = []\n+ if sys.version_info.major < 3:\n+ decoder([ord(b) for b in data], result)\n+ else:\n+ decoder(data, result)\n+ return result\n+ return decode\n+\n+if PyMSNumpress:\n+ _default_compression_map.update(\n+ {\n+ 'MS-Numpress short logged float compression': _numpressDecompress(PyMSNumpress.decodeSlof),\n+ 'MS-Numpress positive integer compression': _numpressDecompress(PyMSNumpress.decodePic),\n+ 'MS-Numpress linear prediction compression': _numpressDecompress(PyMSNumpress.decodeLinear),\n+ })\n+\nclass BinaryDataArrayTransformer(object):\n\"\"\"A base class that provides methods for reading\nbase64-encoded binary arrays.\n@@ -93,10 +120,7 @@ class BinaryDataArrayTransformer(object):\nMaps compressor type name to decompression function\n\"\"\"\n- compression_type_map = {\n- 'no compression': lambda x: x,\n- 'zlib compression': zlib.decompress,\n- }\n+ compression_type_map = _default_compression_map\nclass binary_array_record(namedtuple(\n\"binary_array_record\", (\"data\", \"compression\", \"dtype\", \"source\", \"key\"))):\n@@ -161,5 +185,7 @@ class BinaryDataArrayTransformer(object):\n\"\"\"\nbinary = self._base64_decode(source)\nbinary = self._decompress(binary, compression_type)\n+ if isinstance(binary, list):\n+ return np.array(binary, dtype)\narray = self._transform_buffer(binary, dtype)\nreturn array\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add proof-of-principle numpress support (issue #37) |
377,522 | 02.04.2019 18:15:52 | -10,800 | 76f190246c256cf61751b52f75fb81a4fbd4850b | Use pynumpress instead of PyMSNumpress | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/utils.py",
"new_path": "pyteomics/auxiliary/utils.py",
"diff": "@@ -18,9 +18,9 @@ except ImportError:\nnp = None\ntry:\n- import PyMSNumpress\n+ import pynumpress\nexcept ImportError:\n- PyMSNumpress = None\n+ pynumpress = None\ndef print_tree(d, indent_str=' -> ', indent_count=1):\n\"\"\"Read a nested dict (with strings as keys) and print its structure.\n@@ -92,22 +92,17 @@ _default_compression_map = {\n'zlib compression': zlib.decompress,\n}\n-def _numpressDecompress(decoder):\n+def _pynumpressDecompress(decoder):\ndef decode(data):\n- result = []\n- if sys.version_info.major < 3:\n- decoder([ord(b) for b in data], result)\n- else:\n- decoder(data, result)\n- return result\n+ return decoder(np.frombuffer(data, dtype=np.uint8))\nreturn decode\n-if PyMSNumpress:\n+if pynumpress:\n_default_compression_map.update(\n{\n- 'MS-Numpress short logged float compression': _numpressDecompress(PyMSNumpress.decodeSlof),\n- 'MS-Numpress positive integer compression': _numpressDecompress(PyMSNumpress.decodePic),\n- 'MS-Numpress linear prediction compression': _numpressDecompress(PyMSNumpress.decodeLinear),\n+ 'MS-Numpress short logged float compression': _pynumpressDecompress(pynumpress.decode_slof),\n+ 'MS-Numpress positive integer compression': _pynumpressDecompress(pynumpress.decode_pic),\n+ 'MS-Numpress linear prediction compression': _pynumpressDecompress(pynumpress.decode_linear),\n})\nclass BinaryDataArrayTransformer(object):\n@@ -185,7 +180,5 @@ class BinaryDataArrayTransformer(object):\n\"\"\"\nbinary = self._base64_decode(source)\nbinary = self._decompress(binary, compression_type)\n- if isinstance(binary, list):\n- return np.array(binary, dtype)\narray = self._transform_buffer(binary, dtype)\nreturn array\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Use pynumpress instead of PyMSNumpress |
377,522 | 04.04.2019 01:56:35 | -10,800 | cf98348d2c195be404133a81a81cde61543477d2 | Add support for layered compression | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/utils.py",
"new_path": "pyteomics/auxiliary/utils.py",
"diff": "@@ -97,12 +97,20 @@ def _pynumpressDecompress(decoder):\nreturn decoder(np.frombuffer(data, dtype=np.uint8))\nreturn decode\n+def _zlibNumpress(decoder):\n+ def decode(data):\n+ return decoder(np.frombuffer(zlib.decompress(data), dtype=np.uint8))\n+ return decode\n+\nif pynumpress:\n_default_compression_map.update(\n{\n'MS-Numpress short logged float compression': _pynumpressDecompress(pynumpress.decode_slof),\n'MS-Numpress positive integer compression': _pynumpressDecompress(pynumpress.decode_pic),\n'MS-Numpress linear prediction compression': _pynumpressDecompress(pynumpress.decode_linear),\n+ 'MS-Numpress short logged float compression followed by zlib compression': _zlibNumpress(pynumpress.decode_slof),\n+ 'MS-Numpress positive integer compression followed by zlib compression': _zlibNumpress(pynumpress.decode_pic),\n+ 'MS-Numpress linear prediction compression followed by zlib compression': _zlibNumpress(pynumpress.decode_linear),\n})\nclass BinaryDataArrayTransformer(object):\n@@ -155,8 +163,9 @@ class BinaryDataArrayTransformer(object):\nreturn decompressed_source\ndef _transform_buffer(self, binary, dtype):\n- output = np.frombuffer(binary, dtype=dtype)\n- return output\n+ if isinstance(binary, np.ndarray):\n+ return binary.astype(dtype)\n+ return np.frombuffer(binary, dtype=dtype)\ndef decode_data_array(self, source, compression_type=None, dtype=np.float64):\n\"\"\"Decode a base64-encoded, compressed bytestring into a numerical\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add support for layered compression |
377,522 | 08.04.2019 19:23:42 | -10,800 | c9e2e1498ca71ac8dcd55acdf99e982602b73261 | Add tests for numpress in mzml | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+dev\n+---\n+\n+ - Add `numpress <https://dx.doi.org/10.1074%2Fmcp.O114.037879>`_ support for mzML and mzXML files.\n+ To read files compressed with Numpress, install `pynumpress`\n+ (`PyPI <https://pypi.org/project/pynumpress/>`_, `GitHub <https://github.com/mobiusklein/pynumpress>`_).\n+\n4.1\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1\n\\ No newline at end of file\n+4.1.1a1\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/utils.py",
"new_path": "pyteomics/auxiliary/utils.py",
"diff": "@@ -164,7 +164,7 @@ class BinaryDataArrayTransformer(object):\ndef _transform_buffer(self, binary, dtype):\nif isinstance(binary, np.ndarray):\n- return binary.astype(dtype)\n+ return binary.astype(dtype, copy=False)\nreturn np.frombuffer(binary, dtype=dtype)\ndef decode_data_array(self, source, compression_type=None, dtype=np.float64):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -12,6 +12,8 @@ from data import mzml_spectra\nimport numpy as np\nimport pickle\nimport operator as op\n+import pynumpress\n+import base64\nclass MzmlTest(unittest.TestCase):\nmaxDiff = None\n@@ -162,5 +164,23 @@ class MzmlTest(unittest.TestCase):\nself.assertEqual(mzml_spectra[1], reader.time[0.1])\nself.assertEqual(mzml_spectra, reader.time[0:0.1])\n+ def test_numpress_slof(self):\n+ data = mzml_spectra[0]['intensity array']\n+ encoded = base64.b64encode(pynumpress.encode_slof(data, pynumpress.optimal_slof_fixed_point(data)).tobytes()).decode('ascii')\n+ record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress short logged float compression', data.dtype)\n+ self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))\n+\n+ def test_numpress_linear(self):\n+ data = mzml_spectra[0]['intensity array']\n+ encoded = base64.b64encode(pynumpress.encode_linear(data, pynumpress.optimal_linear_fixed_point(data)).tobytes()).decode('ascii')\n+ record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress linear prediction compression', data.dtype)\n+ self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))\n+\n+ def test_numpress_pic(self):\n+ data = mzml_spectra[0]['intensity array']\n+ encoded = base64.b64encode(pynumpress.encode_pic(data).tobytes()).decode('ascii')\n+ record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress positive integer compression', data.dtype)\n+ self.assertTrue(np.allclose(data, record.decode(), atol=0.6))\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add tests for numpress in mzml |
377,522 | 08.04.2019 19:35:54 | -10,800 | 14c4f9eaf40b74ae694ae2881b9369c23034c534 | Add zlib version of tests | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -14,6 +14,7 @@ import pickle\nimport operator as op\nimport pynumpress\nimport base64\n+import zlib\nclass MzmlTest(unittest.TestCase):\nmaxDiff = None\n@@ -170,17 +171,35 @@ class MzmlTest(unittest.TestCase):\nrecord = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress short logged float compression', data.dtype)\nself.assertTrue(np.allclose(data, record.decode(), rtol=0.001))\n+ def test_numpress_slof_zlib(self):\n+ data = mzml_spectra[0]['intensity array']\n+ encoded = base64.b64encode(zlib.compress(pynumpress.encode_slof(data, pynumpress.optimal_slof_fixed_point(data)).tobytes())).decode('ascii')\n+ record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress short logged float compression followed by zlib compression', data.dtype)\n+ self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))\n+\ndef test_numpress_linear(self):\ndata = mzml_spectra[0]['intensity array']\nencoded = base64.b64encode(pynumpress.encode_linear(data, pynumpress.optimal_linear_fixed_point(data)).tobytes()).decode('ascii')\nrecord = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress linear prediction compression', data.dtype)\nself.assertTrue(np.allclose(data, record.decode(), rtol=0.001))\n+ def test_numpress_linear_zlib(self):\n+ data = mzml_spectra[0]['intensity array']\n+ encoded = base64.b64encode(zlib.compress(pynumpress.encode_linear(data, pynumpress.optimal_linear_fixed_point(data)).tobytes())).decode('ascii')\n+ record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress linear prediction compression followed by zlib compression', data.dtype)\n+ self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))\n+\ndef test_numpress_pic(self):\ndata = mzml_spectra[0]['intensity array']\nencoded = base64.b64encode(pynumpress.encode_pic(data).tobytes()).decode('ascii')\nrecord = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress positive integer compression', data.dtype)\nself.assertTrue(np.allclose(data, record.decode(), atol=0.6))\n+ def test_numpress_pic_zlib(self):\n+ data = mzml_spectra[0]['intensity array']\n+ encoded = base64.b64encode(zlib.compress(pynumpress.encode_pic(data).tobytes())).decode('ascii')\n+ record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress positive integer compression followed by zlib compression', data.dtype)\n+ self.assertTrue(np.allclose(data, record.decode(), atol=0.6))\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add zlib version of tests |
377,522 | 15.04.2019 01:14:44 | -10,800 | 2d6ff1ae3d5d573c6ee4316191931e329e3bccd2 | Convert decoded bytes to bytesarray so that resulting arrays are mutable (issue | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/utils.py",
"new_path": "pyteomics/auxiliary/utils.py",
"diff": "@@ -2,7 +2,6 @@ from __future__ import print_function\nimport base64\nimport zlib\n-import sys\nfrom functools import wraps\nfrom collections import namedtuple\n@@ -83,7 +82,7 @@ def _decode_base64_data_array(source, dtype, is_compressed):\ndecoded_source = base64.b64decode(source.encode('ascii'))\nif is_compressed:\ndecoded_source = zlib.decompress(decoded_source)\n- output = np.frombuffer(decoded_source, dtype=dtype)\n+ output = np.frombuffer(bytearray(decoded_source), dtype=dtype)\nreturn output\n@@ -189,5 +188,7 @@ class BinaryDataArrayTransformer(object):\n\"\"\"\nbinary = self._base64_decode(source)\nbinary = self._decompress(binary, compression_type)\n+ if isinstance(binary, bytes):\n+ binary = bytearray(binary)\narray = self._transform_buffer(binary, dtype)\nreturn array\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Convert decoded bytes to bytesarray so that resulting arrays are mutable (issue #38) |
377,522 | 25.04.2019 17:28:30 | -10,800 | 724214609ee2097b0bcebdce43d5841eb4d206e4 | Fix issue disable indexing by default in ms1 and ms2, add ms2 tests | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -5,6 +5,8 @@ dev\nTo read files compressed with Numpress, install `pynumpress`\n(`PyPI <https://pypi.org/project/pynumpress/>`_, `GitHub <https://github.com/mobiusklein/pynumpress>`_).\n+ - Bugfixes.\n+\n4.1\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1.1a1\n\\ No newline at end of file\n+4.1.1a2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms1.py",
"new_path": "pyteomics/ms1.py",
"diff": "@@ -106,7 +106,7 @@ class MS1Base(object):\nsline = line.strip().split(None, 3)\nparams['scan'] = tuple(sline[1:3])\nif len(sline) == 4: # in MS2 the S line contains the precursor m/z as a 4th column\n- params['precursor m/z'] = float(params[3])\n+ params['precursor m/z'] = float(sline[3])\ndef _handle_I(self, line, sline, params):\nparams[sline[1]] = sline[2]\n@@ -214,9 +214,9 @@ class MS1(aux.FileReader, MS1Base):\nelif sline[0] == 'S':\nyield self._make_scan(params, masses, intensities)\nparams = dict(self.header) if self._use_header else {}\n- params['scan'] = tuple(sline[1:])\nmasses = []\nintensities = []\n+ self._handle_S(line, sline, params)\nelse:\nif sline[0] == 'I': # spectrum-specific parameters!\nself._handle_I(line, sline, params)\n@@ -227,11 +227,10 @@ class MS1(aux.FileReader, MS1Base):\nelse: # this must be a peak list\ntry:\nmasses.append(float(sline[0])) # this may cause\n- intensities.append(float(sline[1])) # exceptions...\\\n+ intensities.append(float(sline[1])) # exceptions...\nexcept ValueError:\nraise aux.PyteomicsError(\n- 'Error when parsing %s. Line: %s' % (\n- self._source_name, line))\n+ 'Error when parsing %s. Line: %s' % (self._source_name, line))\nexcept IndexError:\npass\n@@ -252,6 +251,11 @@ class IndexedMS1(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\nand 'params' stores a :py:class:`dict` of parameters (keys and values are\n:py:class:`str`, keys corresponding to MS1).\n+ .. warning ::\n+ Labels for scan objects are constructed as the first number in the S line, as follows:\n+ for a line ``S 0 1`` the label is `'0'`. If these labels are not unique\n+ for the scans in the file, the indexed parser will not work correctly. Consider using\n+ :py:class:`MS1` instead.\nAttributes\n----------\n@@ -371,13 +375,18 @@ def read(*args, **kwargs):\nFile encoding.\nuse_index : bool, optional\n- Determines which parsing method to use. If :py:const:`True` (default), an instance of\n+ Determines which parsing method to use. If :py:const:`True`, an instance of\n:py:class:`IndexedMS1` is created. This facilitates random access by scan titles.\nIf an open file is passed as `source`, it needs to be open in binary mode.\n- If :py:const:`False`, an instance of :py:class:`MS1` is created. It reads\n+ If :py:const:`False` (default), an instance of :py:class:`MS1` is created. It reads\n`source` in text mode and is suitable for iterative parsing.\n+ .. warning ::\n+ Labels for scan objects are constructed as the first number in the S line, as follows:\n+ for a line ``S 0 1`` the label is `'0'`. If these labels are not unique\n+ for the scans in the file, the indexed parser will not work correctly.\n+\nblock_size : int, optinal\nSize of the chunk (in bytes) used to parse the file when creating the byte offset index.\n(Accepted only for :py:class:`IndexedMS1`.)\n@@ -393,7 +402,7 @@ def read(*args, **kwargs):\nelse:\nsource = kwargs.get('source')\nuse_index = kwargs.pop('use_index', None)\n- use_index = aux._check_use_index(source, use_index, True)\n+ use_index = aux._check_use_index(source, use_index, False)\ntp = IndexedMS1 if use_index else MS1\nreturn tp(*args, **kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms2.py",
"new_path": "pyteomics/ms2.py",
"diff": "@@ -87,6 +87,11 @@ class IndexedMS2(IndexedMS1):\nand 'params' stores a :py:class:`dict` of parameters (keys and values are\n:py:class:`str`, keys corresponding to MS2).\n+ .. warning ::\n+ Labels for scan objects are constructed as the first number in the S line, as follows:\n+ for a line ``S 0 1 123.4`` the label is `'0'`. If these labels are not unique\n+ for the scans in the file, the indexed parser will not work correctly. Consider using\n+ :py:class:`MS2` instead.\nAttributes\n----------\n@@ -152,11 +157,16 @@ def read(*args, **kwargs):\nFile encoding.\nuse_index : bool, optional\n- Determines which parsing method to use. If :py:const:`True` (default), an instance of\n+ Determines which parsing method to use. If :py:const:`True`, an instance of\n:py:class:`IndexedMS2` is created. This facilitates random access by scan titles.\nIf an open file is passed as `source`, it needs to be open in binary mode.\n- If :py:const:`False`, an instance of :py:class:`MS2` is created. It reads\n+ .. warning ::\n+ Labels for scan objects are constructed as the first number in the S line, as follows:\n+ for a line ``S 0 1 123.4`` the label is `'0'`. If these labels are not unique\n+ for the scans in the file, the indexed parser will not work correctly.\n+\n+ If :py:const:`False` (default), an instance of :py:class:`MS2` is created. It reads\n`source` in text mode and is suitable for iterative parsing.\nblock_size : int, optinal\n@@ -174,7 +184,7 @@ def read(*args, **kwargs):\nelse:\nsource = kwargs.get('source')\nuse_index = kwargs.pop('use_index', None)\n- use_index = aux._check_use_index(source, use_index, True)\n+ use_index = aux._check_use_index(source, use_index, False)\ntp = IndexedMS2 if use_index else MS2\nreturn tp(*args, **kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1641,6 +1641,31 @@ ms1_header = {'CreationDate': 'Sat Jun 03 15:25:10 2017',\n'Extractor': 'ProteoWizard',\n'Source file': 'Set 1. B2 at 193 nm RT.RAW'}\n+ms2_spectra = [{'intensity array': makeCA([ 73., 44., 67., 291., 54., 49.]),\n+ 'm/z array': makeCA([ 846.6, 846.8, 847.6, 1640.1, 1640.6, 1895.5]),\n+ 'params': {'charge': [2.0],\n+ 'neutral mass': [1966.193],\n+ 'precursor m/z': 983.6,\n+ 'scan': ('0', '0')}},\n+ {'intensity array': makeCA([ 237., 128., 108., 1007., 974., 79.]),\n+ 'm/z array': makeCA([ 345.1, 370.2, 460.2, 1673.3, 1674. , 1675.3]),\n+ 'params': {'RTime': 25.0, 'precursor m/z': 1084.9, 'scan': ('1', '1')}}]\n+\n+ms2_spectra_lists = [{'intensity array': [ 73., 44., 67., 291., 54., 49.],\n+ 'm/z array': [ 846.6, 846.8, 847.6, 1640.1, 1640.6, 1895.5],\n+ 'params': {'charge': [2.0],\n+ 'neutral mass': [1966.193],\n+ 'precursor m/z': 983.6,\n+ 'scan': ('0', '0')}},\n+ {'intensity array': [ 237., 128., 108., 1007., 974., 79.],\n+ 'm/z array': [ 345.1, 370.2, 460.2, 1673.3, 1674. , 1675.3],\n+ 'params': {'RTime': 25.0, 'precursor m/z': 1084.9, 'scan': ('1', '1')}}]\n+\n+ms2_header = {'CreationDate': 'Wed Apr 24 17:06:23 2019',\n+ 'Extractor': 'ProteoWizard',\n+ 'Extractor version': 'pwiz_2.1.2575 (TPP v4.5 RAPTURE rev 2, Build 201208012328 (linux))',\n+ 'Source file': 'test.mgf'}\n+\nprotxml_results =[{'group_number': 1,\n'probability': 1.0,\n'protein': [{'confidence': 1.0,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test.ms2",
"diff": "+H CreationDate Wed Apr 24 17:06:23 2019\n+H Extractor ProteoWizard\n+H Extractor version pwiz_2.1.2575 (TPP v4.5 RAPTURE rev 2, Build 201208012328 (linux))\n+H Source file test.mgf\n+S 0 0 983.6\n+Z 2 1966.193\n+846.6 73\n+846.8 44\n+847.6 67\n+1640.1 291\n+1640.6 54\n+1895.5 49\n+S 1 1 1084.9\n+I RTime 25\n+345.1 237\n+370.2 128\n+460.2 108\n+1673.3 1007\n+1674 974\n+1675.3 79\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test_ms2.py",
"diff": "+from os import path\n+import numpy as np\n+import pyteomics\n+pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+import unittest\n+from pyteomics.ms2 import read, read_header, MS2, IndexedMS2, chain\n+import data\n+\n+class MS2Test(unittest.TestCase):\n+ maxDiff = None\n+ def setUp(self):\n+ self.path = 'test.ms2'\n+ self.header = read_header(self.path)\n+ self.spectra = list(read(self.path))\n+ self.ns = len(self.spectra)\n+\n+ def test_read(self):\n+ # http://stackoverflow.com/q/14246983/1258041\n+ self.assertEqual(data.ms2_spectra, list(read(self.path)))\n+ for reader in [read, MS2, IndexedMS2, chain]:\n+ with reader(self.path) as reader:\n+ self.assertEqual(data.ms2_spectra, list(reader))\n+\n+ def test_read_array_conversion(self):\n+ with read(self.path, convert_arrays=False) as reader:\n+ self.assertEqual(data.ms2_spectra_lists, list(reader))\n+ with read(self.path, convert_arrays=True) as reader:\n+ s = next(reader)\n+ self.assertTrue(isinstance(s['m/z array'], np.ndarray))\n+\n+ def test_header(self):\n+ self.assertEqual(self.header, data.ms2_header)\n+\n+ def test_read_dtype(self):\n+ dtypes = {'m/z array': np.float32, 'intensity array': np.int32}\n+ with read(self.path, dtype=dtypes) as f:\n+ for spec in f:\n+ for k, v in dtypes.items():\n+ self.assertEqual(spec[k].dtype, v)\n+\n+if __name__ == \"__main__\":\n+ unittest.main()\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix issue #39, disable indexing by default in ms1 and ms2, add ms2 tests |
377,522 | 06.05.2019 17:05:22 | -10,800 | 7e7626e62369cfd4ee9777d45524aac614b41f3a | Try changing image reference syntax in readme | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -5,6 +5,10 @@ dev\nTo read files compressed with Numpress, install `pynumpress`\n(`PyPI <https://pypi.org/project/pynumpress/>`_, `GitHub <https://github.com/mobiusklein/pynumpress>`_).\n+ .. note:: There is a `known issue\n+ <https://bitbucket.org/levitsky/pyteomics/issues/37/add-numpress-support-in-mzml#comment-51540079>`_\n+ with `pynumpress` on Linux and Python 2.7, which is contingent on a fix in Cython.\n+\n- Bugfixes.\n4.1\n"
},
{
"change_type": "MODIFY",
"old_path": "README",
"new_path": "README",
"diff": "-.. image:: https://img.shields.io/pypi/v/pyteomics.svg\n- :target: https://pypi.org/project/pyteomics/\n- :alt: PyPI\n+![PyPI](https://img.shields.io/pypi/v/pyteomics.svg)\n-.. image:: https://img.shields.io/readthedocs/pyteomics.svg\n- :target: https://pyteomics.readthedocs.io/\n- :alt: Read the Docs (latest)\n+[![Read the Docs](https://readthedocs.org/projects/pyteomics/badge/?version=latest)](https://pyteomics.readthedocs.io/en/latest/?badge=latest)\n-.. image:: https://img.shields.io/aur/license/python-pyteomics.svg\n- :target: https://www.apache.org/licenses/LICENSE-2.0\n- :alt: Apache License\n+![Apache license](https://img.shields.io/aur/license/python-pyteomics.svg)\n-.. image:: https://img.shields.io/aur/version/python-pyteomics.svg\n- :target: https://aur.archlinux.org/packages/python-pyteomics/\n- :alt: python-pyteomics on AUR\n-\n-.. image:: https://img.shields.io/badge/pyteomics-awesome-orange.svg\n- :alt: Pyteomics is awesome\n+![python-pyteomics on AUR](https://img.shields.io/aur/version/python-pyteomics.svg)\nWhat is Pyteomics?\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Try changing image reference syntax in readme |
377,522 | 06.05.2019 17:13:09 | -10,800 | ea47ff09dcfa06abd9d4a811b66a7be8d258d9d3 | Go back to rst, try adding extension | [
{
"change_type": "RENAME",
"old_path": "README",
"new_path": "README.rst",
"diff": "-![PyPI](https://img.shields.io/pypi/v/pyteomics.svg)\n-[![Read the Docs](https://readthedocs.org/projects/pyteomics/badge/?version=latest)](https://pyteomics.readthedocs.io/en/latest/?badge=latest)\n-![Apache license](https://img.shields.io/aur/license/python-pyteomics.svg)\n-![python-pyteomics on AUR](https://img.shields.io/aur/version/python-pyteomics.svg)\n+.. image:: https://img.shields.io/pypi/v/pyteomics.svg\n+ :target: https://pypi.org/project/pyteomics/\n+ :alt: PyPI\n+\n+.. image:: https://img.shields.io/readthedocs/pyteomics.svg\n+ :target: https://pyteomics.readthedocs.io/\n+ :alt: Read the Docs (latest)\n+\n+.. image:: https://img.shields.io/aur/license/python-pyteomics.svg\n+ :target: https://www.apache.org/licenses/LICENSE-2.0\n+ :alt: Apache License\n+\n+.. image:: https://img.shields.io/aur/version/python-pyteomics.svg\n+ :target: https://aur.archlinux.org/packages/python-pyteomics/\n+ :alt: python-pyteomics on AUR\n+\n+.. image:: https://img.shields.io/badge/pyteomics-awesome-orange.svg\n+ :alt: Pyteomics is awesome\nWhat is Pyteomics?\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Go back to rst, try adding extension |
377,522 | 07.06.2019 17:59:43 | -10,800 | cb98b95a239b501d20ddf73b86466e28a98535ea | Add pynumpress extra in setup.py | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -36,7 +36,8 @@ setup(\n'TDA': ['numpy'],\n'graphics': ['matplotlib'],\n'DF': ['pandas'],\n- 'Unimod': ['lxml', 'sqlalchemy']},\n+ 'Unimod': ['lxml', 'sqlalchemy'],\n+ 'numpress': ['pynumpress']},\nclassifiers = ['Intended Audience :: Science/Research',\n'Programming Language :: Python :: 2.7',\n'Programming Language :: Python :: 3',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add pynumpress extra in setup.py |
377,522 | 07.06.2019 18:00:42 | -10,800 | 29832311c0165cef5f52535dcdbbe49a98062f4b | Fix incorrect std mass for pyrrolysine (issue | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -747,7 +747,7 @@ std_aa_mass = {\n'R': 156.10111,\n'Y': 163.06333,\n'W': 186.07931,\n- 'O': 255.15829,\n+ 'O': 237.14773,\n}\n\"\"\"A dictionary with monoisotopic masses of the twenty standard\namino acid residues, selenocysteine and pyrrolysine.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mass.py",
"new_path": "tests/test_mass.py",
"diff": "@@ -315,6 +315,9 @@ class MassTest(unittest.TestCase):\nself.assertNotEqual(i, -1)\nself.assertAlmostEqual(abundances[i], abundance)\n+ def test_std_aa_mass(self):\n+ for key, value in mass.std_aa_mass.items():\n+ self.assertAlmostEqual(value, mass.calculate_mass(parsed_sequence=[key]), places=4)\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix incorrect std mass for pyrrolysine (issue #42) |
377,522 | 07.06.2019 18:38:37 | -10,800 | 41e560881115e1777d4759c4ad9a628cd78c1603 | Update pynumpress notice | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -12,7 +12,8 @@ Bugfix: fix the standard mass value for pyrrolysine (issue #42).\n.. note:: There is a `known issue\n<https://bitbucket.org/levitsky/pyteomics/issues/37/add-numpress-support-in-mzml#comment-51540079>`_\n- with `pynumpress` on Linux and Python 2.7, which is contingent on a fix in Cython.\n+ with `pynumpress` on Linux and Python 2.7. Please install the fresh `pynumpress` from GitHub if this\n+ affects you.\n- Bugfixes.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update pynumpress notice |
377,522 | 21.06.2019 00:31:08 | -10,800 | 455188bd014d5ef26563a070cea68eaa77071ecc | Fix docstring of annotate_spectrum | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pylab_aux.py",
"new_path": "pyteomics/pylab_aux.py",
"diff": "@@ -348,6 +348,8 @@ def annotate_spectrum(spectrum, peptide, centroided=True, *args, **kwargs):\nPassed to :py:func:`plot_spectrum`.\ntypes : Container, optional\nIon types to be considered for annotation. Default is `('b', 'y')`.\n+ maxcharge : int, optional\n+ Maximum charge state for fragment ions to be considered. Default is `1`.\ncolors : dict, optional\nKeys are ion types, values are colors to plot the annotated peaks with. Defaults to a red-blue scheme.\nftol : float, optional\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix docstring of annotate_spectrum |
377,522 | 10.08.2019 01:26:46 | -10,800 | 155ec1b8eb2dd9e35008efac4d11e3f0159896fd | Add mass.Unimod.by_id | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+dev\n+---\n+\n+New method :py:meth:`mass.Unimod.by_id`.\n+Also, :py:class:`mass.Unimod` now supports dict-like queries with record IDs.\n+\n4.1.2\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1.2\n\\ No newline at end of file\n+4.1.3dev0\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -965,8 +965,11 @@ class Unimod():\nself._tree = etree.parse(source)\nself._massdata = self._mass_data()\nself._mods = []\n- for mod in self._xpath('/unimod/modifications/mod'):\n- self._mods.append(process_mod(mod))\n+ self._id = {}\n+ for i, mod in enumerate(self._xpath('/unimod/modifications/mod')):\n+ mod_dict = process_mod(mod)\n+ self._mods.append(mod_dict)\n+ self._id[mod_dict['record_id']] = i\ndef _xpath(self, path, element=None):\nfrom ..xml import xpath\n@@ -1057,3 +1060,23 @@ class Unimod():\nif len(result) == 1:\nreturn result[0]\nreturn result\n+\n+ def by_id(self, i):\n+ \"\"\"Search modifications by record ID. If a modification is found,\n+ it is returned. Otherwise, :py:exc:`KeyError` is raised.\n+\n+ Parameters\n+ ----------\n+ i : int or str\n+ The Unimod record ID.\n+\n+ Returns\n+ -------\n+ out : dict\n+ A single modification dict.\n+ \"\"\"\n+ if isinstance(i, str):\n+ i = int(i)\n+ return self._mods[self._id[i]]\n+\n+ __getitem__ = by_id\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add mass.Unimod.by_id |
377,522 | 22.10.2019 18:06:01 | -10,800 | 3bbe1e388b08480e97be04a8368f9d3e65955021 | Tentative solution for Convert unicode to str on PY2 in unitstr and cvstr constructor | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/structures.py",
"new_path": "pyteomics/auxiliary/structures.py",
"diff": "-import sys\nimport re\nfrom collections import defaultdict, Counter\n@@ -291,6 +290,8 @@ class unitstr(str):\n__slots__ = (\"unit_info\", )\ndef __new__(cls, value, unit_info=None):\n+ if PY2 and isinstance(value, unicode):\n+ value = value.encode('utf-8')\ninst = str.__new__(cls, value)\ninst.unit_info = unit_info\nreturn inst\n@@ -327,6 +328,9 @@ class cvstr(str):\nreturn inst\nexcept KeyError:\npass\n+\n+ if PY2 and isinstance(value, unicode):\n+ value = value.encode('utf-8')\ninst = str.__new__(cls, value)\ninst.accession = _intern_unit_or_cv(accession)\ninst.unit_accession = _intern_unit_or_cv(unit_accession)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Tentative solution for #47. Convert unicode to str on PY2 in unitstr and cvstr constructor |
377,522 | 27.10.2019 19:15:44 | -10,800 | 2310eb6c1f6bbca0ef1d5a8a06be706ff698ff93 | Fix and add all_ntt_prob to pepxml dataframe output | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "dev\n---\n-New method :py:meth:`mass.Unimod.by_id`.\n+ - New method :py:meth:`mass.Unimod.by_id`.\nAlso, :py:class:`mass.Unimod` now supports dict-like queries with record IDs.\n+ - Reduce memory footprint for unit primitives (PR #35 by Joshua Klein).\n+\n+ - Fix issues #47, #48.\n+\n+\n+\n4.1.2\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1.3dev0\n\\ No newline at end of file\n+4.1.3dev1\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -403,11 +403,16 @@ def DataFrame(*args, **kwargs):\nif 'analysis_result' in sh:\nfor ar in sh['analysis_result']:\nif ar['analysis'] == 'peptideprophet':\n+ try:\ninfo.update(ar['peptideprophet_result']['parameter'])\n+ except KeyError:\n+ pass\ninfo['peptideprophet_probability'] = ar['peptideprophet_result']['probability']\n+ info['peptideprophet_ntt_prob'] = ar['peptideprophet_result']['all_ntt_prob']\nelif ar['analysis'] == 'interprophet':\ninfo.update(ar['interprophet_result']['parameter'])\ninfo['interprophet_probability'] = ar['interprophet_result']['probability']\n+ info['interprophet_ntt_prob'] = ar['interprophet_result']['all_ntt_prob']\nyield info\nreturn pd.DataFrame(gen_items(), **pd_kwargs)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix #48 and add all_ntt_prob to pepxml dataframe output |
377,522 | 29.11.2019 16:23:09 | -10,800 | caed461879face3e5bd2ae01cd5d4a77eee7073c | Add Python 3.8 to test runner | [
{
"change_type": "MODIFY",
"old_path": "tests/runtests.sh",
"new_path": "tests/runtests.sh",
"diff": "#!/bin/bash\nexport PYTHONPATH=\"..\"\nif [ $# -eq 0 ]; then\n- find . -name 'test_*.py' -exec bash -c 'declare -a versions=(2.7 3.3 3.4 3.5 3.6 3.7); for v in \"${versions[@]}\"; do command -v \"python${v}\" > /dev/null 2>&1 && { echo \"Executing python${v}\" \"$0\"; eval \"python${v}\" \"$0\"; }; done' {} \\;\n+ find . -name 'test_*.py' -exec bash -c 'declare -a versions=(2.7 3.3 3.4 3.5 3.6 3.7 3.8); for v in \"${versions[@]}\"; do command -v \"python${v}\" > /dev/null 2>&1 && { echo \"Executing python${v}\" \"$0\"; eval \"python${v}\" \"$0\"; }; done' {} \\;\nelse\nfor f; do\n- for v in 2.7 3.3 3.4 3.5 3.6 3.7; do\n+ for v in 2.7 3.3 3.4 3.5 3.6 3.7 3.8; do\ncommand -v \"python${v}\" >/dev/null 2>&1 && { echo \"Executing python${v}\" \"$f\"; eval \"python${v}\" \"$f\"; }\ndone\ndone\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add Python 3.8 to test runner |
377,522 | 30.11.2019 23:10:39 | -10,800 | 9d915e4f982192603845e94d8b05edae8768fb20 | Fix by adding new keyword arguments for map-supporting readers: queue_size, queue_timeout and processes.
Make __init__ signatures more cooperative throughout the class tree.
Make sure that index is not constructed in index-saving readers even if file with offsets exists. | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -6,8 +6,7 @@ dev\n- Reduce memory footprint for unit primitives (PR #35 by Joshua Klein).\n- - Fix issues #47, #48.\n-\n+ - Fix issues #44, #46, #47, #48.\n4.1.2\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.1.3dev1\n\\ No newline at end of file\n+4.1.3dev2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -129,15 +129,19 @@ class _file_obj(object):\ndef __iter__(self):\nreturn iter(self.file)\n+class NoOpBaseReader(object):\n+ def __init__(self, *args, **kwargs):\n+ pass\n-class IteratorContextManager(object):\n+class IteratorContextManager(NoOpBaseReader):\n- def __init__(self, _func, *args, **kwargs):\n- self._func = _func\n+ def __init__(self, *args, **kwargs):\n+ self._func = kwargs.pop('parser_func')\nself._args = args\nself._kwargs = kwargs\nif type(self) == IteratorContextManager:\nself.reset()\n+ super(IteratorContextManager, self).__init__(*args, **kwargs)\ndef __getstate__(self):\nstate = {}\n@@ -181,12 +185,13 @@ class FileReader(IteratorContextManager):\nfor file readers.\n\"\"\"\n- def __init__(self, source, mode, func, pass_file, args, kwargs, encoding=None):\n- super(FileReader, self).__init__(func, *args, **kwargs)\n- self._pass_file = pass_file\n+ def __init__(self, source, **kwargs):\n+ func = kwargs['parser_func']\n+ super(FileReader, self).__init__(*kwargs['args'], parser_func=func, **kwargs['kwargs'])\n+ self._pass_file = kwargs['pass_file']\nself._source_init = source\n- self._mode = mode\n- self._encoding = encoding\n+ self._mode = kwargs['mode']\n+ self._encoding = kwargs.get('encoding')\nself.reset()\ndef reset(self):\n@@ -216,7 +221,7 @@ def remove_bom(bstr):\nreturn bstr.replace(codecs.BOM_LE, b'').lstrip(b\"\\x00\")\n-class IndexedReaderMixin(object):\n+class IndexedReaderMixin(NoOpBaseReader):\n\"\"\"Common interface for :py:class:`IndexedTextReader` and :py:class:`IndexedXML`.\"\"\"\n@property\ndef index(self):\n@@ -383,22 +388,17 @@ class IndexedTextReader(IndexedReaderMixin, FileReader):\nblock_size = 1000000\nlabel_group = 1\n- def __init__(self, source, func, pass_file, args, kwargs, encoding='utf-8', block_size=None,\n- delimiter=None, label=None, label_group=None, _skip_index=False):\n+ def __init__(self, source, **kwargs):\n# the underlying _file_obj gets None as encoding\n# to avoid transparent decoding of StreamReader on read() calls\n- super(IndexedTextReader, self).__init__(source, 'rb', func, pass_file, args, kwargs, encoding=None)\n+ encoding = kwargs.pop('encoding', 'utf-8')\n+ super(IndexedTextReader, self).__init__(source, mode='rb', encoding=None, **kwargs)\nself.encoding = encoding\n- if delimiter is not None:\n- self.delimiter = delimiter\n- if label is not None:\n- self.label = label\n- if block_size is not None:\n- self.block_size = block_size\n- if label_group is not None:\n- self.label_group = label_group\n+ for attr in ['delimiter', 'label', 'block_size', 'label_group']:\n+ if attr in kwargs:\n+ setattr(self, attr, kwargs.pop(attr))\nself._offset_index = None\n- if not _skip_index:\n+ if not kwargs.pop('_skip_index', False):\nself._offset_index = self.build_byte_index()\ndef __getstate__(self):\n@@ -475,7 +475,7 @@ class IndexedTextReader(IndexedReaderMixin, FileReader):\nreturn lines\n-class IndexSavingMixin(object):\n+class IndexSavingMixin(NoOpBaseReader):\n\"\"\"Common interface for :py:class:`IndexSavingXML` and :py:class:`IndexSavingTextReader`.\"\"\"\n_index_class = NotImplemented\n@@ -529,6 +529,7 @@ class IndexSavingMixin(object):\nto the method used by :class:`IndexedXML` if this operation fails\ndue to an IOError\n\"\"\"\n+ if not self._use_index: return\ntry:\nself._read_byte_offsets()\nexcept (IOError, AttributeError, TypeError):\n@@ -555,10 +556,10 @@ def _file_reader(_mode='r'):\n@wraps(_func)\ndef helper(*args, **kwargs):\nif args:\n- return FileReader(args[0], _mode, _func, True, args[1:], kwargs,\n- kwargs.pop('encoding', None))\n+ return FileReader(args[0], mode=_mode, parser_func=_func, pass_file=True, args=args[1:], kwargs=kwargs,\n+ encoding=kwargs.pop('encoding', None))\nsource = kwargs.pop('source', None)\n- return FileReader(source, _mode, _func, True, (), kwargs, kwargs.pop('encoding', None))\n+ return FileReader(source, mode=_mode, parser_func=_func, pass_file=True, args=(), kwargs=kwargs, encoding=kwargs.pop('encoding', None))\nreturn helper\nreturn decorator\n@@ -924,9 +925,30 @@ try:\nexcept NotImplementedError:\n_NPROC = 4\n_QUEUE_TIMEOUT = 4\n+_QUEUE_SIZE = int(1e7)\n+\n+class TaskMappingMixin(NoOpBaseReader):\n+ def __init__(self, *args, **kwargs):\n+ '''\n+ Instantiate a :py:class:`TaskMappingMixin` object, set default parameters for IPC.\n+\n+ Parameters\n+ ----------\n+ queue_timeout : float, keyword only, optional\n+ The number of seconds to block, waiting for a result before checking to see if\n+ all workers are done.\n+ queue_size : int, keyword only, optional\n+ The length of IPC queue used.\n+ processes : int, keyword only, optional\n+ Number of worker processes to spawn when :py:meth:`map` is called. This can also be\n+ specified in the :py:meth:`map` call.\n+ '''\n+ self._queue_size = kwargs.pop('queue_size', _QUEUE_SIZE)\n+ self._queue_timeout = kwargs.pop('timeout', _QUEUE_TIMEOUT)\n+ self._nproc = kwargs.pop('processes', _NPROC)\n+ super(TaskMappingMixin, self).__init__(*args, **kwargs)\n-class TaskMappingMixin(object):\ndef _get_reader_for_worker_spec(self):\nreturn self\n@@ -937,8 +959,7 @@ class TaskMappingMixin(object):\ntry:\nserialized.append(serializer.dumps(obj))\nexcept serializer.PicklingError:\n- msg = 'Could not serialize {0} {1} with {2.__name__}.'.format(\n- objname, obj, serializer)\n+ msg = 'Could not serialize {0} {1} with {2.__name__}.'.format(objname, obj, serializer)\nif serializer is not dill:\nmsg += ' Try installing `dill`.'\nraise PyteomicsError(msg)\n@@ -965,7 +986,7 @@ class TaskMappingMixin(object):\nfeeder_thread.start()\nreturn feeder_thread\n- def map(self, target=None, processes=-1, queue_timeout=_QUEUE_TIMEOUT, args=None, kwargs=None, **_kwargs):\n+ def map(self, target=None, processes=-1, args=None, kwargs=None, **_kwargs):\n\"\"\"Execute the ``target`` function over entries of this object across up to ``processes``\nprocesses.\n@@ -977,11 +998,9 @@ class TaskMappingMixin(object):\nThe function to execute over each entry. It will be given a single object yielded by\nthe wrapped iterator as well as all of the values in ``args`` and ``kwargs``\nprocesses : int, optional\n- The number of worker processes to use. If negative, the number of processes\n- will match the number of available CPUs.\n- queue_timeout : float, optional\n- The number of seconds to block, waiting for a result before checking to see if\n- all workers are done.\n+ The number of worker processes to use. If 0 or negative,\n+ defaults to the number of available CPUs.\n+ This parameter can also be set at reader creation.\nargs : :class:`Sequence`, optional\nAdditional positional arguments to be passed to the target function\nkwargs : :class:`Mapping`, optional\n@@ -995,7 +1014,7 @@ class TaskMappingMixin(object):\nThe work item returned by the target function.\n\"\"\"\nif processes < 1:\n- processes = _NPROC\n+ processes = self._nproc\niterator = self._task_map_iterator()\nif args is None:\n@@ -1011,8 +1030,8 @@ class TaskMappingMixin(object):\nserialized = self._build_worker_spec(target, args, kwargs)\ndone_event = mp.Event()\n- in_queue = mp.Queue(int(1e7))\n- out_queue = mp.Queue(int(1e7))\n+ in_queue = mp.Queue(self._queue_size)\n+ out_queue = mp.Queue(self._queue_size)\nworkers = self._spawn_workers(serialized, in_queue, out_queue, done_event, processes)\nfeeder_thread = self._spawn_feeder_thread(in_queue, iterator, processes)\n@@ -1021,7 +1040,7 @@ class TaskMappingMixin(object):\nwhile True:\ntry:\n- result = out_queue.get(True, queue_timeout)\n+ result = out_queue.get(True, self._queue_timeout)\nyield result\nexcept Empty:\nif all(w.is_done() for w in workers):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/target_decoy.py",
"new_path": "pyteomics/auxiliary/target_decoy.py",
"diff": "@@ -691,7 +691,7 @@ def _make_filter(read, is_decoy_prefix, is_decoy_suffix, key, qvalues):\n\"\"\"\nif kwargs.pop('full_output', True):\nreturn filter(*args, full_output=True, **kwargs)\n- return IteratorContextManager(filter, *args, **kwargs)\n+ return IteratorContextManager(*args, parser_func=filter, **kwargs)\n_fix_docstring(_filter, is_decoy=is_decoy_prefix, key=key)\nif read is _iter:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -114,7 +114,7 @@ from . import auxiliary as aux\nProtein = namedtuple('Protein', ('description', 'sequence'))\n-class FASTABase():\n+class FASTABase(object):\n\"\"\"Abstract base class for FASTA file parsers.\nCan be used for type checking.\n\"\"\"\n@@ -122,10 +122,12 @@ class FASTABase():\n_ignore_comments = False\n_comments = set('>;')\n- def __init__(self, ignore_comments=False, parser=None):\n- self._ignore_comments = ignore_comments\n+ def __init__(self, source, **kwargs):\n+ self._ignore_comments = kwargs.pop('ignore_comments', False)\n+ parser = kwargs.pop('parser', None)\nif parser is not None:\nself.parser = parser\n+ super(FASTABase, self).__init__(source, **kwargs)\ndef _is_comment(self, line):\nreturn line[0] in self._comments\n@@ -134,7 +136,7 @@ class FASTABase():\nraise NotImplementedError\n-class FASTA(aux.FileReader, FASTABase):\n+class FASTA(FASTABase, aux.FileReader):\n\"\"\"Text-mode, sequential FASTA parser.\nSuitable for iteration over the file to obtain all entries in order.\n\"\"\"\n@@ -162,9 +164,8 @@ class FASTA(aux.FileReader, FASTABase):\nencoding : str or None, optional\nFile encoding (if it is given by name).\n\"\"\"\n- aux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n- FASTABase.__init__(self, ignore_comments, parser)\n- self.encoding = encoding\n+ super(FASTA, self).__init__(source, mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={},\n+ encoding=encoding, ignore_comments=ignore_comments, parser=parser)\ndef _read(self):\naccumulated_strings = []\n@@ -214,7 +215,7 @@ def _reconstruct(cls, args, kwargs):\nreturn cls(*args, **kwargs)\n-class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\n+class IndexedFASTA(FASTABase, aux.TaskMappingMixin, aux.IndexedTextReader):\n\"\"\"Indexed FASTA parser. Supports direct indexing by matched labels.\"\"\"\ndelimiter = '\\n>'\nlabel = r'^[\\n]?>(.*)\\s*'\n@@ -251,8 +252,8 @@ class IndexedFASTA(aux.TaskMappingMixin, aux.IndexedTextReader, FASTABase):\nThis in combination with `label` can be used to extract fields from headers.\nHowever, consider using :py:class:`TwoLayerIndexedFASTA` for this purpose.\n\"\"\"\n- aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, **kwargs)\n- FASTABase.__init__(self, ignore_comments, parser)\n+ super(IndexedFASTA, self).__init__(source, ignore_comments=ignore_comments, parser=parser,\n+ parser_func=self._read, pass_file=False, args=(), kwargs={}, **kwargs)\nself._init_args = (source, ignore_comments, parser)\nself._init_kwargs = kwargs\n@@ -304,7 +305,8 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nfull headers are mapped to byte offsets.\nWhen indexed, the key is looked up in both indexes, allowing access by meaningful IDs\n- (like UniProt accession) and by full header string.\"\"\"\n+ (like UniProt accession) and by full header string.\n+ \"\"\"\nheader_group = 1\nheader_pattern = None\ndef __init__(self, source, header_pattern=None, header_group=None,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -75,8 +75,8 @@ import itertools as it\nimport sys\nfrom . import auxiliary as aux\n-class MGFBase():\n- \"\"\"Abstract class representing an MGF file. Subclasses implement different approaches to parsing.\"\"\"\n+class MGFBase(object):\n+ \"\"\"Abstract mixin class representing an MGF file. Subclasses implement different approaches to parsing.\"\"\"\n_comments = set('#;!/')\n_array = (lambda x, dtype: np.array(x, dtype=dtype)) if np is not None else None\n_ma = (lambda x, dtype: np.ma.masked_equal(np.array(x, dtype=dtype), 0)) if np is not None else None\n@@ -91,7 +91,7 @@ class MGFBase():\nencoding = None\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n+ def __init__(self, source=None, **kwargs):\n\"\"\"Create an MGF file object, set MGF-specific parameters.\nParameters\n@@ -101,33 +101,35 @@ class MGFBase():\nA file object (or file name) with data in MGF format. Default is\n:py:const:`None`, which means read standard input.\n- use_header : bool, optional\n+ use_header : bool, optional, keyword only\nAdd the info from file header to each dict. Spectrum-specific parameters\noverride those from the header in case of conflict.\nDefault is :py:const:`True`.\n- convert_arrays : one of {0, 1, 2}, optional\n+ convert_arrays : one of {0, 1, 2}, optional, keyword only\nIf `0`, m/z, intensities and (possibly) charges will be returned as regular lists.\nIf `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.\nIf `2`, charges will be reported as a masked array (default).\nThe default option is the slowest. `1` and `2` require :py:mod:`numpy`.\n- read_charges : bool, optional\n+ read_charges : bool, optional, keyword only\nIf `True` (default), fragment charges are reported. Disabling it improves performance.\n- dtype : type or str or dict, optional\n+ dtype : type or str or dict, optional, keyword only\ndtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\nKeys should be 'm/z array', 'intensity array' and/or 'charge array'.\n- encoding : str, optional\n+ encoding : str, optional, keyword only\nFile encoding.\n\"\"\"\n- self._use_header = use_header\n- self._convert_arrays = convert_arrays\n+ super(MGFBase, self).__init__(source, **kwargs)\n+ self._use_header = kwargs.pop('use_header', True)\n+ self._convert_arrays = kwargs.pop('convert_arrays', 2)\nif self._convert_arrays and np is None:\nraise aux.PyteomicsError('numpy is required for array conversion')\n- self._read_charges = read_charges\n+ self._read_charges = kwargs.pop('read_charges', True)\n+ dtype = kwargs.pop('dtype', None)\nself._dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in self._array_keys}\nif self._use_header:\nself._read_header()\n@@ -232,7 +234,7 @@ class MGFBase():\nreturn self.get_spectrum(key)\n-class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexSavingTextReader, MGFBase):\n+class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexSavingTextReader):\n\"\"\"\nA class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\nparsing. Specific spectra can be accessed by title using the indexing syntax in constant time.\n@@ -260,15 +262,15 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\nlabel = r'TITLE=([^\\n]*\\S)\\s*'\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\n- dtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n- aux.TimeOrderedIndexedReaderMixin.__init__(self, source, self._read, False, (), {}, encoding,\n- block_size, _skip_index=_skip_index)\n- MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\n+ dtype=None, encoding='utf-8', _skip_index=False, **kwargs):\n+ super(IndexedMGF, self).__init__(source, parser_func=self._read, pass_file=False, args=(), kwargs={},\n+ use_header=use_header, convert_arrays=convert_arrays, read_charges=read_charges,\n+ dtype=dtype, encoding=encoding, _skip_index=_skip_index, **kwargs)\ndef __reduce_ex__(self, protocol):\nreturn (self.__class__,\n- (self._source_init, False, self._convert_arrays,\n- self._read_charges, self._dtype_dict, self.encoding, self.block_size, True),\n+ (self._source_init, False, self._convert_arrays, self._read_charges,\n+ self._dtype_dict, self.encoding, True),\nself.__getstate__())\ndef __getstate__(self):\n@@ -279,8 +281,8 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\ndef __setstate__(self, state):\nsuper(IndexedMGF, self).__setstate__(state)\n- self._use_header = state['use_header']\nself._header = state['header']\n+ self._use_header = state['use_header']\n@aux._keepstate_method\ndef _read_header(self):\n@@ -311,7 +313,7 @@ class IndexedMGF(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\nraise aux.PyteomicsError('RT information not found.')\n-class MGF(aux.FileReader, MGFBase):\n+class MGF(MGFBase, aux.FileReader):\n\"\"\"\nA class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\nparsing. Specific spectra can be accessed by title using the indexing syntax (if the file is seekable),\n@@ -336,9 +338,9 @@ class MGF(aux.FileReader, MGFBase):\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,\ndtype=None, encoding=None):\n- aux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n- MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\n- self.encoding = encoding\n+ super(MGF, self).__init__(source, mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={},\n+ encoding=encoding, use_header=use_header, convert_arrays=convert_arrays, read_charges=read_charges, dtype=dtype)\n+ # self.encoding = encoding\n@aux._keepstate_method\ndef _read_header(self):\n@@ -422,7 +424,6 @@ def read(*args, **kwargs):\nuse_index = kwargs.pop('use_index', None)\nuse_index = aux._check_use_index(source, use_index, True)\ntp = IndexedMGF if use_index else MGF\n-\nreturn tp(*args, **kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms1.py",
"new_path": "pyteomics/ms1.py",
"diff": "@@ -60,7 +60,8 @@ class MS1Base(object):\n\"\"\"Abstract class representing an MS1 file. Subclasses implement different approaches to parsing.\"\"\"\n_array_keys = ['m/z array', 'intensity array']\n- def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None):\n+ def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, **kwargs):\n+ super(MS1Base, self).__init__(source, **kwargs)\nif convert_arrays and np is None:\nraise aux.PyteomicsError('numpy is required for array conversion')\nself._convert_arrays = convert_arrays\n@@ -72,6 +73,7 @@ class MS1Base(object):\nself._header = None\nself._source_name = getattr(source, 'name', str(source))\n+\n@property\ndef header(self):\nreturn self._header\n@@ -161,7 +163,7 @@ class MS1Base(object):\nreturn self._make_scan(params, masses, intensities)\n-class MS1(aux.FileReader, MS1Base):\n+class MS1(MS1Base, aux.FileReader):\n\"\"\"\nA class representing an MS1 file. Supports the `with` syntax and direct iteration for sequential\nparsing.\n@@ -179,10 +181,12 @@ class MS1(aux.FileReader, MS1Base):\nThe file header.\n\"\"\"\n- def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None):\n- aux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n- MS1Base.__init__(self, source, use_header, convert_arrays, dtype)\n- self.encoding = encoding\n+ def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None, **kwargs):\n+ super(MS1, self).__init__(source, use_header=use_header, convert_arrays=convert_arrays, dtype=dtype, encoding=encoding,\n+ mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={})\n+ # aux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n+ # MS1Base.__init__(self, source, use_header, convert_arrays, dtype)\n+ # self.encoding = encoding\n@aux._keepstate_method\ndef _read_header(self):\n@@ -237,7 +241,7 @@ class MS1(aux.FileReader, MS1Base):\nyield self._make_scan(params, masses, intensities)\n-class IndexedMS1(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexedTextReader, MS1Base):\n+class IndexedMS1(MS1Base, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexedTextReader):\n\"\"\"\nA class representing an MS1 file. Supports the `with` syntax and direct iteration for sequential\nparsing. Specific spectra can be accessed by title using the indexing syntax in constant time.\n@@ -269,11 +273,12 @@ class IndexedMS1(aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.In\ndelimiter = '\\nS'\nlabel = r'^[\\n]?S\\s+(\\S+)'\n- def __init__(self, source=None, use_header=False, convert_arrays=True,\n- dtype=None, encoding='utf-8', block_size=1000000, _skip_index=False):\n- aux.TimeOrderedIndexedReaderMixin.__init__(self, source, self._read, False, (), {}, encoding,\n- block_size, _skip_index=_skip_index)\n- MS1Base.__init__(self, source, use_header, convert_arrays, dtype)\n+ def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding='utf-8', _skip_index=False, **kwargs):\n+ super(IndexedMS1, self).__init__(source, use_header=use_header, convert_arrays=convert_arrays, dtype=dtype, encoding=encoding,\n+ parser_func=self._read, pass_file=False, args=(), kwargs={}, _skip_index=_skip_index)\n+ # aux.TimeOrderedIndexedReaderMixin.__init__(self, source, self._read, False, (), {}, encoding,\n+ # block_size, _skip_index=_skip_index)\n+ # MS1Base.__init__(self, source, use_header, convert_arrays, dtype)\ndef __reduce_ex__(self, protocol):\nreturn (self.__class__,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -100,7 +100,7 @@ from . import auxiliary as aux\nfrom . import xml, _schema_defaults\n-class MzIdentML(xml.IndexSavingXML, xml.MultiProcessingXML):\n+class MzIdentML(xml.MultiProcessingXML, xml.IndexSavingXML):\n\"\"\"Parser class for MzIdentML files.\"\"\"\nfile_format = 'mzIdentML'\n_root_element = 'MzIdentML'\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -238,9 +238,8 @@ class XML(FileReader):\n(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).\n\"\"\"\n- super(XML, self).__init__(source, 'rb', self.iterfind, False,\n- (self._default_iter_tag,), kwargs)\n-\n+ super(XML, self).__init__(source, mode='rb', parser_func=self.iterfind, pass_file=False,\n+ args=(self._default_iter_tag,), kwargs=kwargs)\nif iterative:\nself._tree = None\nelse:\n@@ -1017,6 +1016,7 @@ class IndexedXML(IndexedReaderMixin, XML):\nif use_index:\nbuild_id_cache = False\nsuper(IndexedXML, self).__init__(source, read_schema, iterative, build_id_cache, *args, **kwargs)\n+\nself._offset_index = HierarchicalOffsetIndex()\nself._build_index()\n@@ -1129,7 +1129,6 @@ class IndexSavingXML(IndexSavingMixin, IndexedXML):\nself._offset_index = index\n-\nclass ArrayConversionMixin(BinaryDataArrayTransformer):\n_dtype_dict = {}\n_array_keys = ['m/z array', 'intensity array']\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -29,7 +29,7 @@ class MGFTest(unittest.TestCase):\nself.tmpfile.close()\ndef test_read(self):\n- for func in [mgf.read, mgf.MGF]:\n+ for func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:\n# http://stackoverflow.com/q/14246983/1258041\nself.assertEqual(data.mgf_spectra_long, list(func(self.path)))\nself.assertEqual(data.mgf_spectra_short, list(func(self.path, False)))\n@@ -38,6 +38,10 @@ class MGFTest(unittest.TestCase):\nwith func(self.path, False) as reader:\nself.assertEqual(data.mgf_spectra_short, list(reader))\n+ def test_read_source_kw(self):\n+ for func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:\n+ self.assertEqual(data.mgf_spectra_long, list(func(source=self.path)))\n+\ndef test_read_decoding(self):\nfor func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:\nself.assertEqual(data.mgf_spectra_long_decoded,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -34,6 +34,12 @@ class MzmlTest(unittest.TestCase):\nwith MzML(self.path) as f:\nself.assertEqual(sorted(mzml_spectra, key=key), sorted(list(f.map()), key=key))\n+ def test_map_qsize(self):\n+ key = op.itemgetter('index')\n+ with MzML(self.path, queue_size=1000) as f:\n+ self.assertEqual(f._queue_size, 1000)\n+ self.assertEqual(sorted(mzml_spectra, key=key), sorted(list(f.map()), key=key))\n+\ndef test_decoding(self):\nwith MzML(self.path, decode_binary=True) as reader:\nspectrum = next(reader)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix #44 by adding new keyword arguments for map-supporting readers: queue_size, queue_timeout and processes.
Make __init__ signatures more cooperative throughout the class tree.
Make sure that index is not constructed in index-saving readers even if file with offsets exists. |
377,522 | 02.12.2019 19:51:43 | -10,800 | ece2047158e5c4843fe734a5a27c6907a2a2d394 | Trying to configure pipelines | [
{
"change_type": "MODIFY",
"old_path": "bitbucket-pipelines.yml",
"new_path": "bitbucket-pipelines.yml",
"diff": "@@ -7,6 +7,7 @@ pipelines:\ncaches:\n- pip\nscript:\n- - pip install lxml numpy sqlalchemy pandas pynumpress\n+ - pip install lxml numpy sqlalchemy pandas\n+ - pip install pynumpress\n- cd tests; PYTHONPATH=.. find . -name 'test_*.py' -exec python {} \\;\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Trying to configure pipelines |
377,522 | 02.12.2019 19:55:10 | -10,800 | 5217a5ebcec7ce81341784d323a30025166808cf | Try testing against two versions of Python | [
{
"change_type": "MODIFY",
"old_path": "bitbucket-pipelines.yml",
"new_path": "bitbucket-pipelines.yml",
"diff": "-image: python:3.8\n-\npipelines:\ndefault:\n- step:\n+ image: python:3.8\ncaches:\n- pip\nscript:\n@@ -11,3 +10,11 @@ pipelines:\n- pip install pynumpress\n- cd tests; PYTHONPATH=.. find . -name 'test_*.py' -exec python {} \\;\n+ - step:\n+ image: python:2.7\n+ caches:\n+ - pip\n+ script:\n+ - pip install lxml numpy sqlalchemy pandas\n+ - pip install pynumpress\n+ - cd tests; PYTHONPATH=.. find . -name 'test_*.py' -exec python {} \\;\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Try testing against two versions of Python |
377,522 | 02.12.2019 19:31:45 | 0 | 8afcaf4bee711ea67586384279263a40e755678e | Add cython to the build pipeline requirements | [
{
"change_type": "MODIFY",
"old_path": "bitbucket-pipelines.yml",
"new_path": "bitbucket-pipelines.yml",
"diff": "@@ -8,7 +8,7 @@ pipelines:\ncaches:\n- pip\nscript:\n- - pip install lxml numpy sqlalchemy pandas\n+ - pip install lxml numpy sqlalchemy pandas cython\n- pip install pynumpress\n- cd tests; find . -name 'test_*.py' -print0 | xargs -0 -n1 env PYTHONPATH=.. python\n@@ -18,6 +18,6 @@ pipelines:\ncaches:\n- pip\nscript:\n- - pip install lxml numpy sqlalchemy pandas\n+ - pip install lxml numpy sqlalchemy pandas cython\n- pip install pynumpress\n- cd tests; find . -name 'test_*.py' -print0 | xargs -0 -n1 env PYTHONPATH=.. python\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add cython to the build pipeline requirements |
377,522 | 03.12.2019 18:04:46 | -10,800 | 7e75a8849062d31f602da850727bb42441a56926 | A sloppy fix for auxiliary doc | [
{
"change_type": "MODIFY",
"old_path": "doc/source/api.rst",
"new_path": "doc/source/api.rst",
"diff": "@@ -29,6 +29,6 @@ Contents:\napi/featurexml\napi/trafoxml\napi/traml\n- api/auxiliary\napi/pylab_aux\napi/xml\n+ api/auxiliary\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/api/auxiliary.rst",
"new_path": "doc/source/api/auxiliary.rst",
"diff": "-.. automodule:: pyteomics.auxiliary\n- :exclude-members: Charge, ChargeList, BasicComposition, FileReader\n+auxiliary - common functions and objects\n+========================================\n- .. autofunction:: cvquery\n\\ No newline at end of file\n+Math\n+----\n+\n+ :py:func:`linear_regression_vertical` - a wrapper for NumPy linear regression,\n+ minimizes the sum of squares of *y* errors.\n+\n+ :py:func:`linear_regression` - alias for :py:func:`linear_regression_vertical`.\n+\n+ :py:func:`linear_regression_perpendicular` - a wrapper for NumPy linear regression,\n+ minimizes the sum of squares of (perpendicular) distances between the points and the line.\n+\n+\n+Target-Decoy Approach\n+---------------------\n+\n+ :py:func:`qvalues` - estimate q-values for a set of PSMs.\n+\n+ :py:func:`!filter` - filter PSMs to specified FDR level using TDA or given PEPs.\n+\n+ :py:func:`filter.chain` - a chained version of :py:func:`!filter`.\n+\n+ :py:func:`fdr` - estimate FDR in a set of PSMs using TDA or given PEPs.\n+\n+Project infrastructure\n+----------------------\n+\n+ :py:class:`PyteomicsError` - a pyteomics-specific exception.\n+\n+Helpers\n+-------\n+\n+ :py:class:`Charge` - a subclass of :py:class:`int` for charge states.\n+\n+ :py:class:`ChargeList` - a subclass of :py:class:`list` for lists of charges.\n+\n+ :py:func:`print_tree` - display the structure of a complex nested\n+ :py:class:`dict`.\n+\n+ :py:func:`memoize` - makes a\n+ `memoization <http://stackoverflow.com/a/1988826/1258041>`_\n+ `function decorator <http://stackoverflow.com/a/1594484/1258041>`_.\n+\n+ :py:func:`cvquery` - traverse an arbitrarily nested dictionary looking\n+ for keys which are :py:class:`cvstr` instances, or objects\n+ with an attribute called ``accession``.\n+\n+-------------------------------------------------------------------------------\n+\n+\n+.. automodule :: pyteomics.auxiliary.math\n+\n+.. automodule :: pyteomics.auxiliary.target_decoy\n+\n+.. automodule :: pyteomics.auxiliary.utils\n+\n+.. automodule :: pyteomics.auxiliary.structures\n+\n+.. automodule :: pyteomics.auxiliary.file_helpers\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | A sloppy fix for auxiliary doc |
377,522 | 07.12.2019 01:12:30 | -10,800 | 399ad88977da95e7a868ec635c9b36f9031cfa96 | Add changelog entry for new TaskMappingMixin kwargs | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -24,6 +24,8 @@ dev\n:py:meth:`map` calls will also operate on the full index.\n+ - New keyword arguments `queue_size`, `queue_timeout` and `processes` for indexed parsers with support for :py:meth:`map`.\n+\n- New method :py:meth:`mass.Unimod.by_id`.\nAlso, :py:class:`mass.Unimod` now supports dict-like queries with record IDs.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add changelog entry for new TaskMappingMixin kwargs |
377,522 | 24.12.2019 17:14:42 | -10,800 | 5982cab147056e9c87330560d1e8bb5edf2a27e5 | Add standard error calculation for fdr | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -31,6 +31,8 @@ dev\n- Reduce memory footprint for unit primitives (PR #35 by Joshua Klein).\n+ - New functions :py:func:`pyteomics.auxiliary.sigma_T` and :py:func:`pyteomics.auxiliary.sigma_fdr`.\n+\n- Fix issues #44, #46, #47, #48.\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-4.2.0dev1\n\\ No newline at end of file\n+4.2.0dev2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -30,7 +30,7 @@ from .target_decoy import (\n_construct_dtype, _make_qvalues, _make_filter,\n_itercontext, _iter, qvalues, filter, log_factorial,\n_expectation, _confidence_value, _log_pi_r,\n- _log_pi, _make_fdr, fdr)\n+ _log_pi, _make_fdr, fdr, sigma_T, sigma_fdr)\nfrom .utils import (\nprint_tree, memoize, BinaryDataArrayTransformer,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/target_decoy.py",
"new_path": "pyteomics/auxiliary/target_decoy.py",
"diff": "@@ -787,6 +787,34 @@ def _log_pi(d, k, p=0.5):\nreturn _log_pi_r(d, k, p) + (d + 1) * math.log(1 - p)\n+def _count_psms(psms, is_decoy, pep, decoy_prefix, decoy_suffix, is_decoy_prefix, is_decoy_suffix):\n+ total, decoy = 0, 0\n+ if pep is not None:\n+ is_decoy = pep\n+ elif is_decoy is None:\n+ if decoy_suffix is not None:\n+ is_decoy = lambda x: is_decoy_suffix(x, decoy_suffix)\n+ else:\n+ is_decoy = lambda x: is_decoy_prefix(x, decoy_prefix)\n+ if isinstance(is_decoy, basestring):\n+ decoy = psms[is_decoy].sum()\n+ total = psms.shape[0]\n+ elif callable(is_decoy):\n+ for psm in psms:\n+ total += 1\n+ d = is_decoy(psm)\n+ decoy += d if pep is not None else bool(d)\n+ else:\n+ if not isinstance(is_decoy, (Sized, Container)):\n+ is_decoy = list(is_decoy)\n+ if pep is not None:\n+ decoy = sum(is_decoy)\n+ else:\n+ decoy = sum(map(bool, is_decoy))\n+ total = len(is_decoy)\n+ return decoy, total\n+\n+\ndef _make_fdr(is_decoy_prefix, is_decoy_suffix):\ndef fdr(psms=None, formula=1, is_decoy=None, ratio=1, correction=0, pep=None, decoy_prefix='DECOY_', decoy_suffix=None):\n\"\"\"Estimate FDR of a data set using TDA or given PEP values.\n@@ -890,30 +918,8 @@ def _make_fdr(is_decoy_prefix, is_decoy_suffix):\n\"\"\"\nif formula not in {1, 2}:\nraise PyteomicsError('`formula` must be either 1 or 2.')\n- total, decoy = 0, 0\n- if pep is not None:\n- is_decoy = pep\n- elif is_decoy is None:\n- if decoy_suffix is not None:\n- is_decoy = lambda x: is_decoy_suffix(x, decoy_suffix)\n- else:\n- is_decoy = lambda x: is_decoy_prefix(x, decoy_prefix)\n- if isinstance(is_decoy, basestring):\n- decoy = psms[is_decoy].sum()\n- total = psms.shape[0]\n- elif callable(is_decoy):\n- for psm in psms:\n- total += 1\n- d = is_decoy(psm)\n- decoy += d if pep is not None else bool(d)\n- else:\n- if not isinstance(is_decoy, (Sized, Container)):\n- is_decoy = list(is_decoy)\n- if pep is not None:\n- decoy = sum(is_decoy)\n- else:\n- decoy = sum(map(bool, is_decoy))\n- total = len(is_decoy)\n+\n+ decoy, total = _count_psms(psms, is_decoy, pep, decoy_prefix, decoy_suffix, is_decoy_prefix, is_decoy_suffix)\nif pep is not None:\nreturn float(decoy) / total\ntfalse = decoy\n@@ -950,3 +956,35 @@ def _make_fdr(is_decoy_prefix, is_decoy_suffix):\nfdr = _make_fdr(None, None)\n+\n+def _sigma_T(decoy, ratio):\n+ return math.sqrt((decoy + 1) * (ratio + 1) / (ratio * ratio))\n+\n+def sigma_T(psms, is_decoy, ratio=1):\n+ \"\"\"Calculates the standard error for the number of false positive target PSMs.\n+\n+ The formula is::\n+\n+ .. math ::\n+\n+ \\\\sigma(T) = \\\\sqrt{\\\\frac{(d + 1) \\\\cdot {p}}{(1 - p)^{2}}} = \\\\sqrt{\\\\frac{d+1}{r^{2}} \\\\cdot (r+1)}\n+\n+ This estimation is accurate for low FDRs.\n+ See the `article <http://dx.doi.org/10.1021/acs.jproteome.6b00144>`_ for more details.\n+ \"\"\"\n+ decoy, total = _count_psms(psms, is_decoy, None, None, None, None, None)\n+ return _sigma_T(decoy, ratio)\n+\n+def sigma_fdr(psms=None, formula=1, is_decoy=None, ratio=1):\n+ \"\"\"Calculates the standard error of FDR using the formula for negative binomial distribution.\n+ See :py:func:`sigma_T` for math. This estimation is accurate for low FDRs.\n+ See also the `article <http://dx.doi.org/10.1021/acs.jproteome.6b00144>`_ for more details.\n+ \"\"\"\n+\n+ if formula not in {1, 2}:\n+ raise PyteomicsError('`formula` must be either 1 or 2.')\n+ decoy, total = _count_psms(psms, is_decoy, None, None, None, None, None)\n+ sigmaT = _sigma_T(decoy, ratio)\n+ if formula == 1:\n+ return sigmaT / (total - decoy) / ratio\n+ return sigmaT / total / ratio\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -782,6 +782,12 @@ class FDRTest(unittest.TestCase):\npep = [self.pep((s, l, p)) for s, l, p in psms]\nself._run_check(psms, is_decoy=isd, pep=pep)\n+ def test_sigma_T(self):\n+ self.assertAlmostEqual(aux.sigma_T(psms, is_decoy=self.is_decoy), 7.348469228)\n+\n+ def test_sigma_fdr(self):\n+ self.assertAlmostEqual(aux.sigma_fdr(psms, is_decoy=self.is_decoy), 0.28263343)\n+\nclass RegressionTests(unittest.TestCase):\nx = [1, 2, 3]\ny = [3, 5, 7]\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add standard error calculation for fdr |
377,524 | 08.01.2020 16:16:00 | 0 | 2be6d53f66ba9e971e7be7b1269dd264364b60e1 | Change std_parsers key from uniprotkb to uniprot | [
{
"change_type": "MODIFY",
"old_path": "doc/source/data/text.rst",
"new_path": "doc/source/data/text.rst",
"diff": "@@ -227,7 +227,7 @@ parsers that can be used for this purpose.\n.. code-block:: python\n- >>> with fasta.read('HUMAN.fasta', parser=fasta.std_parsers['uniprotkb']) as r:\n+ >>> with fasta.read('HUMAN.fasta', parser=fasta.std_parsers['uniprot']) as r:\n>>> print(next(r).description)\n{'PE': 2, 'gene_id': 'LCE6A', 'GN': 'LCE6A', 'id': 'A0A183', 'taxon': 'HUMAN',\n'SV': 1, 'OS': 'Homo sapiens', 'entry': 'LCE6A_HUMAN',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Change std_parsers key from uniprotkb to uniprot |
377,522 | 14.01.2020 18:10:51 | -3,600 | 17681e50d727356c06a4291c45b6c108ff99f148 | Add mgf test that breaks after | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -124,6 +124,12 @@ class MGFTest(unittest.TestCase):\nself.assertEqual(data.mgf_spectra_long[1], f.get_spectrum(key))\nself.assertEqual(data.mgf_spectra_long[1], mgf.get_spectrum(self.path, key))\n+ def test_read_list(self):\n+ key = ['Spectrum 2', 'Spectrum 1']\n+ with mgf.IndexedMGF(self.path) as f:\n+ self.assertEqual(data.mgf_spectra_long[::-1], f[key])\n+\n+\ndef test_indexedmgf_picklable(self):\nwith mgf.IndexedMGF(self.path) as reader:\nspec = pickle.dumps(reader)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add mgf test that breaks after de236b0 |
377,522 | 14.01.2020 18:21:13 | -3,600 | 0ea70228fa54782d9fcb7d0ca52681c8e3fd2ff5 | Fix the broken MGF behavior by moving __getitem__ override from MGFBase to MGF | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -230,9 +230,6 @@ class MGFBase(object):\ndef get_spectrum(self, title):\nraise NotImplementedError()\n- def __getitem__(self, key):\n- return self.get_spectrum(key)\n-\nclass IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexSavingTextReader):\n\"\"\"\n@@ -363,6 +360,9 @@ class MGF(MGFBase, aux.FileReader):\nspectrum['params']['title'] = title\nreturn spectrum\n+ def __getitem__(self, key):\n+ return self.get_spectrum(key)\n+\ndef read(*args, **kwargs):\n\"\"\"Returns a reader for a given MGF file. Most of the parameters repeat the\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -129,7 +129,6 @@ class MGFTest(unittest.TestCase):\nwith mgf.IndexedMGF(self.path) as f:\nself.assertEqual(data.mgf_spectra_long[::-1], f[key])\n-\ndef test_indexedmgf_picklable(self):\nwith mgf.IndexedMGF(self.path) as reader:\nspec = pickle.dumps(reader)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix the broken MGF behavior by moving __getitem__ override from MGFBase to MGF |
377,522 | 04.02.2020 15:25:41 | -10,800 | 0a080feadba274fd5767e1574365b51f28c8b64b | Do not define BinaryDataArrayTransformer if numpy is not imported | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/utils.py",
"new_path": "pyteomics/auxiliary/utils.py",
"diff": "@@ -112,6 +112,7 @@ if pynumpress:\n'MS-Numpress linear prediction compression followed by zlib compression': _zlibNumpress(pynumpress.decode_linear),\n})\n+if np is not None:\nclass BinaryDataArrayTransformer(object):\n\"\"\"A base class that provides methods for reading\nbase64-encoded binary arrays.\n@@ -192,3 +193,6 @@ class BinaryDataArrayTransformer(object):\nbinary = bytearray(binary)\narray = self._transform_buffer(binary, dtype)\nreturn array\n+\n+else:\n+ BinaryDataArrayTransformer = None\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Do not define BinaryDataArrayTransformer if numpy is not imported |
377,522 | 01.03.2020 01:29:17 | -10,800 | 1b9c011a7e184ce143a6b4946c48a267e538effb | Forget bibucket pipelines | [
{
"change_type": "DELETE",
"old_path": "bitbucket-pipelines.yml",
"new_path": null,
"diff": "-\n-pipelines:\n- default:\n- - parallel:\n- - step:\n- name: Python 3.8\n- image: python:3.8\n- caches:\n- - pip\n- script:\n- - pip install lxml numpy sqlalchemy pandas cython\n- - pip install pynumpress\n- - cd tests; find . -name 'test_*.py' -print0 | xargs -0 -n1 env PYTHONPATH=.. python\n-\n- - step:\n- name: Python 2.7\n- image: python:2.7\n- caches:\n- - pip\n- script:\n- - pip install lxml numpy sqlalchemy pandas cython\n- - pip install pynumpress\n- - cd tests; find . -name 'test_*.py' -print0 | xargs -0 -n1 env PYTHONPATH=.. python\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Forget bibucket pipelines |
377,522 | 01.03.2020 01:50:39 | -10,800 | 6bba4e8f6d06191bb4a6c81d741a5d684b66fca1 | Try setting up a testing workflow | [
{
"change_type": "ADD",
"old_path": null,
"new_path": ".github/workflows/pythonpackage.yml",
"diff": "+name: Python package\n+\n+on: [push, pull_request]\n+\n+jobs:\n+ build:\n+\n+ runs-on: ubuntu-latest\n+ strategy:\n+ matrix:\n+ python-version: [2.7, 3.5, 3.6, 3.7, 3.8]\n+\n+ steps:\n+ - uses: actions/checkout@v2\n+ - name: Set up Python ${{ matrix.python-version }}\n+ uses: actions/setup-python@v1\n+ with:\n+ python-version: ${{ matrix.python-version }}\n+ - name: Install dependencies\n+ run: |\n+ python -m pip install --upgrade pip\n+ pip install lxml numpy sqlalchemy pandas cython\n+ pip install pynumpress\n+ - name: Lint with flake8\n+ run: |\n+ pip install flake8\n+ # stop the build if there are Python syntax errors or undefined names\n+ flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n+ # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide\n+ flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n+ - name: Run the tests\n+ run: |\n+ cd tests; find . -name 'test_*.py' -print0 | xargs -0 -n1 env PYTHONPATH=.. python\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Try setting up a testing workflow |