author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
377,522 | 17.04.2017 15:38:09 | -10,800 | a565470cbc0ceace6be904b8935bcce2c8188fca | Workaround for older pandas weirdness | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.4.2a1\n\\ No newline at end of file\n+3.4.2a2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary.py",
"new_path": "pyteomics/auxiliary.py",
"diff": "@@ -692,7 +692,7 @@ def _qvalues_df(psms, keyf, isdecoy, **kwargs):\nq = _calculate_qvalues(psms[keyf].values, psms[isdecoy].values, peps is not None, **kwargs)\nif remove_decoy:\n- q = q[~psms[isdecoy]]\n+ q = q[~psms[isdecoy].values]\npsms = psms[~psms[isdecoy]].copy()\nif not full:\nif peps is None:\n@@ -708,7 +708,7 @@ def _qvalues_df(psms, keyf, isdecoy, **kwargs):\npsms = psms_\nelse:\nq_label = kwargs['q_label']\n- psms.loc[:, q_label] = q\n+ psms[q_label] = q\nreturn psms\ndef _decoy_or_pep_label(**kwargs):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -217,7 +217,6 @@ class FilterTest(unittest.TestCase):\ndef _run_check_pep(self, *args, **kwargs):\nkey = kwargs.pop('key', self.key)\n- # is_decoy = kwargs.get('is_decoy', self.is_decoy)\nf11 = aux.filter(*args, key=key, fdr=0.02, **kwargs)\nf12 = aux.filter(*args, fdr=0.02, **kwargs)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Workaround for older pandas weirdness |
377,522 | 17.04.2017 18:42:59 | -10,800 | 212dcfeef7dda82e22844be9ead2477df5af3b08 | Draft mzid.DataFrame | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -298,6 +298,78 @@ def is_decoy(psm):\nreturn all(pe['isDecoy'] for sii in psm['SpectrumIdentificationItem']\nfor pe in sii['PeptideEvidenceRef'])\n+\n+def DataFrame(*args, **kwargs):\n+ \"\"\"Read MzIdentML files into a :py:class:`pandas.DataFrame`.\n+\n+ Requires :py:mod:`pandas`.\n+\n+ .. warning :: Only the first 'SpectrumIdentificationItem' element is considered in every\n+ 'SpectrumIdentificationResult'.\n+\n+ Parameters\n+ ----------\n+ *args, **kwargs : passed to :py:func:`chain`\n+\n+ sep : str or None, optional\n+ Split protein descriptions by this delimiter, if given.\n+\n+ Returns\n+ -------\n+ out : pandas.DataFrame\n+ \"\"\"\n+ import pandas as pd\n+ data = []\n+\n+ sep = kwargs.pop('sep', None)\n+ with chain(*args, **kwargs) as f:\n+ for item in f:\n+ info = {}\n+ for k, v in item.items():\n+ if isinstance(v, (str, int, float)):\n+ info[k] = v\n+ sii = item.get('SpectrumIdentificationItem', [None])[0]\n+ if sii is not None:\n+ info.update((k, v) for k, v in sii.items() if isinstance(v, (str, int, float)))\n+ evref = sii.get('PeptideEvidenceRef', [None])[0]\n+ if evref is not None:\n+ info.update((k, v) for k, v in evref.items() if isinstance(v, (str, int, float, list)))\n+ data.append(info)\n+ df = pd.DataFrame(data)\n+ if sep is not None and 'protein description' in df:\n+ df['protein description'] = df['protein description'].str.split(sep)\n+ return df\n+\n+def filter_df(*args, **kwargs):\n+ \"\"\"Read MzIdentML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\n+ Positional arguments can be MzIdentML files or DataFrames.\n+\n+ Requires :py:mod:`pandas`.\n+\n+ .. warning :: Only the first 'SpectrumIdentificationItem' element is considered in every\n+ 'SpectrumIdentificationResult'.\n+\n+ Parameters\n+ ----------\n+ key : str / iterable / callable, optional\n+ Default is 'mascot:expectation value'.\n+ is_decoy : str / iterable / callable, optional\n+ Default is 'isDecoy'.\n+ *args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+\n+ Returns\n+ -------\n+ out : pandas.DataFrame\n+ \"\"\"\n+ import pandas as pd\n+ kwargs.setdefault('key', 'mascot:expectation value')\n+ kwargs.setdefault('is_decoy', 'isDecoy')\n+ if all(isinstance(arg, pd.DataFrame) for arg in args):\n+ df = pd.concat(args)\n+ else:\n+ df = DataFrame(*args, **kwargs)\n+ return aux.filter(df, **kwargs)\n+\nfdr = aux._make_fdr(is_decoy)\n_key = lambda x: min(\nsii['mascot:expectation value'] for sii in x['SpectrumIdentificationItem'])\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Draft mzid.DataFrame |
377,522 | 17.04.2017 18:43:48 | -10,800 | e84d394069248ad138462d8fdde76392ae986e2c | Make pepxml and tandem read() accept (and ignore) arbitrary kwargs.
This is convenient for qvalues functions | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "3.4.2\n-----\n+ Add :py:func:`pyteomics.mzid.DataFrame` and :py:func:`pyteomics.mzid.filter_df` functions for convenience.\n+ Their behavior may be refined later on.\n+\nChanges in behavior of :py:func:`pyteomics.auxiliary.filter` and :py:func:`pyteomics.auxiliary.qvalues`:\n- both functions now always return DataFrames with :py:class:`pandas.DataFrame` input and `full_output=True`.\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.4.2a3\n\\ No newline at end of file\n+3.4.2a4\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -191,7 +191,7 @@ class PepXML(xml.XML):\ninfo['search_hit'].sort(key=lambda x: x['hit_rank'])\nreturn info\n-def read(source, read_schema=True, iterative=True):\n+def read(source, read_schema=True, iterative=True, **kwargs):\n\"\"\"Parse `source` and iterate through peptide-spectrum matches.\nParameters\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -155,7 +155,7 @@ class TandemXML(xml.XML):\nnext = __next__\n-def read(source, iterative=True):\n+def read(source, iterative=True, **kwargs):\n\"\"\"Parse `source` and iterate through peptide-spectrum matches.\nParameters\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make pepxml and tandem read() accept (and ignore) arbitrary kwargs.
This is convenient for qvalues functions |
377,532 | 27.04.2017 17:42:41 | -10,800 | 8e527f08e682e87e33bd0f34ea6c0885c19a34bc | Speed up pepxml module | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -108,15 +108,18 @@ class PepXML(xml.XML):\ndef _get_info_smart(self, element, **kw):\n\"\"\"Extract the info in a smart way depending on the element type\"\"\"\n- name = xml._local_name(element)\nkwargs = dict(kw)\n+ try:\n+ name = kwargs.pop('ename')\n+ except:\n+ name = xml._local_name(element)\nrec = kwargs.pop('recursive', None)\nif name == 'msms_pipeline_analysis':\n- info = self._get_info(element,\n+ info = self._get_info(element, ename=name,\nrecursive=(rec if rec is not None else False),\n**kwargs)\nelse:\n- info = self._get_info(element,\n+ info = self._get_info(element, ename=name,\nrecursive=(rec if rec is not None else True),\n**kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -49,7 +49,7 @@ except ImportError: # Python 3.x\ndef _local_name(element):\n\"\"\"Strip namespace from the XML element's name\"\"\"\nif element.tag and element.tag[0] == '{':\n- return element.tag.rsplit('}', 1)[1]\n+ return element.tag.rpartition('}')[2]\nreturn element.tag\n@@ -234,15 +234,17 @@ class XML(FileReader):\nif _local_name(elem) == 'attribute' and elem.attrib.get(\n'type', '').split(':')[-1] in val:\nanc = elem.getparent()\n+ anc_name = _local_name(anc)\nwhile not (\n- (_local_name(anc) == 'complexType'\n+ (anc_name == 'complexType'\nand 'name' in anc.attrib)\n- or _local_name(anc) == 'element'):\n+ or anc_name == 'element'):\nanc = anc.getparent()\n+ anc_name = _local_name(anc)\nif anc is None:\nbreak\nelse:\n- if _local_name(anc) == 'complexType':\n+ if anc_name == 'complexType':\nelnames = [x.attrib['name'] for x in\nschema_tree.iter()\nif x.attrib.get('type', ''\n@@ -307,6 +309,9 @@ class XML(FileReader):\ndef _get_info(self, element, **kwargs):\n\"\"\"Extract info from element's attributes, possibly recursive.\n<cvParam> and <userParam> elements are treated in a special way.\"\"\"\n+ try:\n+ name = kwargs.pop('ename')\n+ except:\nname = _local_name(element)\nschema_info = self.schema_info\nif name in {'cvParam', 'userParam'}:\n@@ -327,10 +332,10 @@ class XML(FileReader):\ninfo['name'].append(newinfo.pop('name'))\nelse:\nif cname not in schema_info['lists']:\n- info[cname] = self._get_info_smart(child, **kwargs)\n+ info[cname] = self._get_info_smart(child, ename=cname, **kwargs)\nelse:\ninfo.setdefault(cname, []).append(\n- self._get_info_smart(child, **kwargs))\n+ self._get_info_smart(child, ename=cname, **kwargs))\n# process element text\nif element.text:\n@@ -346,7 +351,7 @@ class XML(FileReader):\nfor k, v in info.items():\nfor t, a in converters.items():\ntry:\n- if (_local_name(element), k) in schema_info[t]:\n+ if (name, k) in schema_info[t]:\ninfo[k] = a(v)\nexcept KeyError:\ncontinue\n@@ -527,8 +532,7 @@ def get_rel_path(element, names):\nyield element\nelse:\nfor child in element.iterchildren():\n- if _local_name(child).lower() == names[0].lower(\n- ) or names[0] == '*':\n+ if names[0] == '*' or _local_name(child).lower() == names[0].lower():\nif len(names) == 1:\nyield child\nelse:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Speed up pepxml module |
377,522 | 27.04.2017 18:01:35 | -10,800 | b8cee70c4ba906ed61e57b00186673af091428ff | Minor fixes, changelog update | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "3.4.2\n-----\n- Add :py:func:`pyteomics.mzid.DataFrame` and :py:func:`pyteomics.mzid.filter_df` functions for convenience.\n+ - Add :py:func:`pyteomics.mzid.DataFrame` and :py:func:`pyteomics.mzid.filter_df` functions for convenience.\nTheir behavior may be refined later on.\n- Changes in behavior of :py:func:`pyteomics.auxiliary.filter` and :py:func:`pyteomics.auxiliary.qvalues`:\n+ - Changes in behavior of :py:func:`pyteomics.auxiliary.filter` and :py:func:`pyteomics.auxiliary.qvalues`:\n- both functions now always return DataFrames with :py:class:`pandas.DataFrame` input and `full_output=True`.\n- additional parameters `score_label`, `decoy_label`, `pep_label`, and `q_label` for output control.\n+ - Performance optimizations in XML parsing code.\n+\n3.4.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.4.2a4\n\\ No newline at end of file\n+3.4.2a5\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -111,7 +111,7 @@ class PepXML(xml.XML):\nkwargs = dict(kw)\ntry:\nname = kwargs.pop('ename')\n- except:\n+ except KeyError:\nname = xml._local_name(element)\nrec = kwargs.pop('recursive', None)\nif name == 'msms_pipeline_analysis':\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -311,7 +311,7 @@ class XML(FileReader):\n<cvParam> and <userParam> elements are treated in a special way.\"\"\"\ntry:\nname = kwargs.pop('ename')\n- except:\n+ except KeyError:\nname = _local_name(element)\nschema_info = self.schema_info\nif name in {'cvParam', 'userParam'}:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Minor fixes, changelog update |
377,522 | 27.04.2017 18:11:55 | -10,800 | 5a7cf841b3d077fc2c20b133078e2778baaa29c0 | Remove dict duplication in pepxml _get_info_smart | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -106,9 +106,8 @@ class PepXML(xml.XML):\n_default_iter_tag = 'spectrum_query'\n_structures_to_flatten = {'search_score_summary', 'modification_info'}\n- def _get_info_smart(self, element, **kw):\n+ def _get_info_smart(self, element, **kwargs):\n\"\"\"Extract the info in a smart way depending on the element type\"\"\"\n- kwargs = dict(kw)\ntry:\nname = kwargs.pop('ename')\nexcept KeyError:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove dict duplication in pepxml _get_info_smart |
377,532 | 28.04.2017 16:54:55 | -10,800 | 888094dbdaf8eeb03bece4c3e3b625998d84fcc7 | Speed up pepxml.read() | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -105,6 +105,11 @@ class PepXML(xml.XML):\n_default_version = '1.15'\n_default_iter_tag = 'spectrum_query'\n_structures_to_flatten = {'search_score_summary', 'modification_info'}\n+ # attributes which contain unconverted values\n+ _convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff'},\n+ 'int': {'start_scan', 'end_scan', 'index'},\n+ 'bool': {'is_rejected'},\n+ 'floatarray': {'all_ntt_prob'}}.items()\ndef _get_info_smart(self, element, **kwargs):\n\"\"\"Extract the info in a smart way depending on the element type\"\"\"\n@@ -122,11 +127,6 @@ class PepXML(xml.XML):\nrecursive=(rec if rec is not None else True),\n**kwargs)\n- # attributes which contain unconverted values\n- convert = {'float': {'calc_neutral_pep_mass', 'massdiff'},\n- 'int': {'start_scan', 'end_scan', 'index'},\n- 'bool': {'is_rejected'},\n- 'floatarray': {'all_ntt_prob'}}\ndef safe_float(s):\ntry:\nreturn float(s)\n@@ -138,7 +138,7 @@ class PepXML(xml.XML):\n'bool': lambda x: x.lower() in {'1', 'true'},\n'floatarray': lambda x: list(map(float, x[1:-1].split(',')))}\nfor k, v in dict(info).items():\n- for t, s in convert.items():\n+ for t, s in self._convert_items:\nif k in s:\ndel info[k]\ninfo[k] = converters[t](v)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -179,6 +179,8 @@ class XML(FileReader):\nself.version_info = self._get_version_info()\nself.schema_info = self._get_schema_info(read_schema)\n+ self._converters_items = self._converters.items()\n+\n@_keepstate\ndef _get_version_info(self):\n\"\"\"\n@@ -347,14 +349,10 @@ class XML(FileReader):\nreturn stext\n# convert types\n- converters = self._converters\nfor k, v in info.items():\n- for t, a in converters.items():\n- try:\n- if (name, k) in schema_info[t]:\n+ for t, a in self._converters_items:\n+ if t in schema_info and (name, k) in schema_info[t]:\ninfo[k] = a(v)\n- except KeyError:\n- continue\n# resolve refs\nif kwargs.get('retrieve_refs'):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Speed up pepxml.read() |
377,532 | 26.05.2017 15:57:24 | -10,800 | c62b0ee762721f61d3078c9a2bd6ce8310a1ca90 | small speed up of mgf.read | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -159,7 +159,6 @@ def read(source=None, use_header=True, convert_arrays=2, read_charges=True, dtyp\nparams[l[0].lower()] = l[1].strip()\nelse: # this must be a peak list\nl = sline.split()\n- if len(l) >= 2:\ntry:\nmasses.append(float(l[0])) # this may cause\nintensities.append(float(l[1])) # exceptions...\\\n@@ -169,6 +168,8 @@ def read(source=None, use_header=True, convert_arrays=2, read_charges=True, dtyp\nraise aux.PyteomicsError(\n'Error when parsing %s. Line:\\n%s' %\n(source, line))\n+ except IndexError:\n+ pass\n@aux._keepstate\ndef read_header(source):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | small speed up of mgf.read |
377,522 | 03.06.2017 18:04:20 | -10,800 | 9af3a48f8b141448ebffaa84c99c642cf212b340 | Add ms1 parser | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "3.4.2\n-----\n+ - New module :py:mod:`pyteomics.ms1` for parsing of MS1 files.\n+\n- Add :py:func:`pyteomics.mzid.DataFrame` and :py:func:`pyteomics.mzid.filter_df` functions for convenience.\nTheir behavior may be refined later on.\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.4.2a5\n\\ No newline at end of file\n+3.4.2a6\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pyteomics/ms1.py",
"diff": "+from . import auxiliary as aux\n+try:\n+ import numpy as np\n+except ImportError:\n+ np = None\n+_array_keys = ['m/z array', 'intensity array']\n+\n+@aux._file_reader()\n+def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dtype=None):\n+ \"\"\"Read an MS1 file and return entries iteratively.\n+\n+ Read the specified MS1 file, **yield** spectra one by one.\n+ Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n+ 'intensity array', and 'params'. 'm/z array' and\n+ 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n+ and 'params' stores a :py:class:`dict` of parameters.\n+\n+ Parameters\n+ ----------\n+\n+ source : str or file or None, optional\n+ A file object (or file name) with data in MGF format. Default is\n+ :py:const:`None`, which means read standard input.\n+\n+ use_header : bool, optional\n+ Add the info from file header to each dict. Spectrum-specific parameters\n+ override those from the header in case of conflict.\n+ Default is :py:const:`True`.\n+\n+ convert_arrays : bool, optional\n+ If :py:const:`False`, m/z and intensities will be returned as regular lists.\n+ If :py:const:`True` (default), they will be converted to regular :py:class:`numpy.ndarray`'s.\n+ Conversion requires :py:mod:`numpy`.\n+\n+ dtype : type or str or dict, optional\n+ dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\n+ Keys should be 'm/z array' and/or 'intensity array'.\n+\n+ Returns\n+ -------\n+\n+ out : FileReader\n+ \"\"\"\n+ if convert_arrays and np is None:\n+ raise aux.PyteomicsError('numpy is required for array conversion')\n+ dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in _array_keys}\n+ header = read_header(source)\n+ reading_spectrum = False\n+ params = {}\n+ masses = []\n+ intensities = []\n+ if use_header: params.update(header)\n+ for line in source:\n+ sline = line.strip().split(maxsplit=2)\n+ if not reading_spectrum:\n+ if sline[0] == 'S':\n+ reading_spectrum = True\n+ params['scan'] = tuple(sline[1:])\n+ # otherwise we are not interested; do nothing, just move along\n+ else:\n+ if not sline:\n+ pass\n+ elif sline[0] == 'S':\n+ out = {'params': params}\n+ if convert_arrays:\n+ data = {'m/z array': masses, 'intensity array': intensities}\n+ for key, values in data.items():\n+ out[key] = np.array(values, dtype=dtype_dict.get(key))\n+ else:\n+ out['m/z array'] = masses\n+ out['intensity array'] = intensities\n+ yield out\n+ del out\n+ params = dict(header) if use_header else {}\n+ masses = []\n+ intensities = []\n+ else:\n+ if sline[0] == 'I': # spectrum-specific parameters!\n+ params[sline[1]] = sline[2]\n+ else: # this must be a peak list\n+ try:\n+ masses.append(float(sline[0])) # this may cause\n+ intensities.append(float(sline[1])) # exceptions...\\\n+ except ValueError:\n+ raise aux.PyteomicsError(\n+ 'Error when parsing %s. Line:\\n%s' %\n+ (source, line))\n+ except IndexError:\n+ pass\n+\n+@aux._keepstate\n+def read_header(source):\n+ \"\"\"\n+ Read the specified MS1 file, get the parameters specified in the header\n+ as a :py:class:`dict`.\n+\n+ Parameters\n+ ----------\n+\n+ source : str or file\n+ File name or file object representing an file in MS1 format.\n+\n+ Returns\n+ -------\n+\n+ header : dict\n+ \"\"\"\n+ with aux._file_obj(source, 'r') as source:\n+ header = {}\n+ for line in source:\n+ l = line.split(maxsplit=2)\n+ if l[0] != 'H':\n+ break\n+ key = l[1]\n+ val = l[2].strip()\n+ header[key] = val\n+ return header\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add ms1 parser |
377,522 | 05.06.2017 19:23:03 | -10,800 | 6f1863fd626209645af153ff8d882b2378828aed | Read info from multiple PeptideEvidenceRef elements in mzid.DataFrame | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -316,7 +316,10 @@ def DataFrame(*args, **kwargs):\n*args, **kwargs : passed to :py:func:`chain`\nsep : str or None, optional\n- Split protein descriptions by this delimiter, if given.\n+ Some values related to PSMs (such as protein information) are variable-length\n+ lists. If `sep` is a :py:class:`str`, they will be packed into single string using\n+ this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n+ :py:const:`None`.\nReturns\n-------\n@@ -335,13 +338,29 @@ def DataFrame(*args, **kwargs):\nsii = item.get('SpectrumIdentificationItem', [None])[0]\nif sii is not None:\ninfo.update((k, v) for k, v in sii.items() if isinstance(v, (str, int, float)))\n- evref = sii.get('PeptideEvidenceRef', [None])[0]\n- if evref is not None:\n- info.update((k, v) for k, v in evref.items() if isinstance(v, (str, int, float, list)))\n+ evref = sii.get('PeptideEvidenceRef')\n+ if evref:\n+ prot_descr, accessions, isd, starts, ends, lengths = [], [], [], [], [], []\n+ for d in evref:\n+ prot_descr.append(d['protein description'])\n+ accessions.append(d['accession'])\n+ isd.append(d.get('isDecoy'))\n+ starts.append(d['start'])\n+ ends.append(d['end'])\n+ lengths.append(d['length'])\n+ isd = all(isd)\n+ if sep is not None:\n+ prot_descr = sep.join(prot_descr)\n+ accessions = sep.join(accessions)\n+ info.update((k, v) for k, v in evref[0].items() if isinstance(v, (str, int, float, list)))\n+ info['protein description'] = prot_descr\n+ info['accession'] = accessions\n+ info['isDecoy'] = isd\n+ info['start'] = starts\n+ info['end'] = ends\n+ info['length'] = lengths\ndata.append(info)\ndf = pd.DataFrame(data)\n- if sep is not None and 'protein description' in df:\n- df['protein description'] = df['protein description'].str.split(sep)\nreturn df\ndef filter_df(*args, **kwargs):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Read info from multiple PeptideEvidenceRef elements in mzid.DataFrame |
377,522 | 06.06.2017 17:21:06 | -10,800 | 323943ef56d852c165020d4e1536ba07bf46576e | Fix for empty dataframes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary.py",
"new_path": "pyteomics/auxiliary.py",
"diff": "@@ -677,15 +677,22 @@ def _qvalues_df(psms, keyf, isdecoy, **kwargs):\npeps = kwargs.get('pep')\ndecoy_or_pep_label = _decoy_or_pep_label(**kwargs)\nq_label = kwargs.setdefault('q_label', 'q')\n+ score_label = kwargs.setdefault('score_label', 'score')\nif callable(keyf):\nkeyf = psms.apply(keyf, axis=1)\nif callable(isdecoy):\nisdecoy = psms.apply(isdecoy, axis=1)\nif not isinstance(keyf, basestring):\n- psms[kwargs.setdefault('score_label', 'score')] = keyf\n+ if psms.shape[0]:\n+ psms[score_label] = keyf\n+ else:\n+ psms[score_label] = []\nkeyf = kwargs['score_label']\nif not isinstance(isdecoy, basestring):\n+ if psms.shape[0]:\npsms[decoy_or_pep_label] = isdecoy\n+ else:\n+ psms[decoy_or_pep_label] = []\nisdecoy = decoy_or_pep_label\nreverse = kwargs.get('reverse', False)\npsms.sort_values([keyf, isdecoy], ascending=[not reverse, True], inplace=True)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -99,6 +99,14 @@ class QvalueTest(unittest.TestCase):\nq = aux.qvalues(psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1, full_output=True)\nself._run_check(q, 1)\n+ def test_qvalues_empty_dataframe(self):\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]\n+ psms = pd.DataFrame(np.array([], dtype=dtype))\n+ q = aux.qvalues(psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1)\n+ self.assertEqual(q.shape[0], 0)\n+ q = aux.qvalues(psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1, full_output=True)\n+ self.assertEqual(q.shape[0], 0)\n+\ndef test_qvalues_pep_from_dataframe(self):\ndtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]\npsms = pd.DataFrame(np.array(list(self.psms), dtype=dtype))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix for empty dataframes |
377,522 | 06.06.2017 17:47:21 | -10,800 | b9c4daf5a0ff6f9e64f968645df31337868a3ba1 | Another fix for empty df | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary.py",
"new_path": "pyteomics/auxiliary.py",
"diff": "@@ -695,18 +695,28 @@ def _qvalues_df(psms, keyf, isdecoy, **kwargs):\npsms[decoy_or_pep_label] = []\nisdecoy = decoy_or_pep_label\nreverse = kwargs.get('reverse', False)\n+\n+ if not full: # create fields early\n+ if peps is None:\n+ fields = [(keyf, np.float64), (isdecoy, np.bool_), (q_label, np.float64)]\n+ else:\n+ fields = [(isdecoy, np.float64), (q_label, np.float64)]\n+ dtype = np.dtype(fields)\n+\npsms.sort_values([keyf, isdecoy], ascending=[not reverse, True], inplace=True)\n+ if not psms.shape[0]:\n+ if full:\n+ psms[q_label] = []\n+ return psms\n+ else:\n+ return np.array([], dtype=dtype)\n+\nq = _calculate_qvalues(psms[keyf].values, psms[isdecoy].values, peps is not None, **kwargs)\nif remove_decoy:\nq = q[~psms[isdecoy].values]\npsms = psms[~psms[isdecoy]].copy()\nif not full:\n- if peps is None:\n- fields = [(keyf, np.float64), (isdecoy, np.bool_), (q_label, np.float64)]\n- else:\n- fields = [(isdecoy, np.float64), (q_label, np.float64)]\n- dtype = np.dtype(fields)\npsms_ = np.empty_like(q, dtype=dtype)\nif peps is None:\npsms_[keyf] = psms[keyf]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_auxiliary.py",
"new_path": "tests/test_auxiliary.py",
"diff": "@@ -420,6 +420,14 @@ class FilterTest(unittest.TestCase):\npsms = pd.DataFrame(np.array(self.psms, dtype=dtype))\nself._run_check(psms)\n+ def test_filter_empty_dataframe(self):\n+ dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]\n+ psms = pd.DataFrame(np.array([], dtype=dtype))\n+ f = aux.filter(psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1, fdr=0.1)\n+ self.assertEqual(f.shape[0], 0)\n+ f = aux.qvalues(psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1, full_output=True, fdr=0.1)\n+ self.assertEqual(f.shape[0], 0)\n+\ndef test_filter_pep_dataframe(self):\ndtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]\npsms = pd.DataFrame(np.array(self.psms, dtype=dtype))\n@@ -453,6 +461,14 @@ class FilterTest(unittest.TestCase):\npsms = pd.DataFrame(psms)\nself._run_check(psms, key='score', is_decoy='is decoy')\n+ def test_filter_empty_dataframe_str_key_str_is_decoy(self):\n+ # dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\n+ psms = pd.DataFrame({'score': [], 'is decoy': []})\n+ f = aux.filter(psms, key='score', is_decoy='is decoy', fdr=0.1)\n+ self.assertEqual(f.shape[0], 0)\n+ f = aux.qvalues(psms, key='score', is_decoy='is decoy', remove_decoy=False, formula=1, full_output=True, fdr=0.01)\n+ self.assertEqual(f.shape[0], 0)\n+\ndef test_filter_pep_dataframe_str_key_str_pep(self):\ndtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool)]\npsms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Another fix for empty df |
377,522 | 06.06.2017 19:22:50 | -10,800 | 4950155ce23df611ff7a66af00b4248be98dd655 | Move ion_comp and charge logic into Composition constructor | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- New module :py:mod:`pyteomics.ms1` for parsing of MS1 files.\n- - Add :py:func:`pyteomics.mzid.DataFrame` and :py:func:`pyteomics.mzid.filter_df` functions for convenience.\n+ - :py:class:`pyteomics.mass.mass.Composition` constructor now accepts `ion_type` and `charge`\n+ parameters.\n+\n+ - New functions :py:func:`pyteomics.mzid.DataFrame` and :py:func:`pyteomics.mzid.filter_df`.\nTheir behavior may be refined later on.\n- Changes in behavior of :py:func:`pyteomics.auxiliary.filter` and :py:func:`pyteomics.auxiliary.qvalues`:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -123,7 +123,7 @@ def _parse_isotope_string(label):\nisotope_num = int(num) if num else 0\nreturn element_name, isotope_num\n-# Initialize std_aa_comp before the Composition class\n+# Initialize std_aa_comp and std_ion_comp before the Composition class\n# description, fill it later.\nstd_aa_comp = {}\n\"\"\"A dictionary with elemental compositions of the twenty standard\n@@ -131,6 +131,13 @@ amino acid residues, selenocysteine, pyrrolysine,\nand standard H- and -OH terminal groups.\n\"\"\"\n+std_ion_comp = {}\n+\"\"\"A dict with relative elemental compositions of the standard peptide\n+fragment ions. An elemental composition of a fragment ion is calculated as a\n+difference between the total elemental composition of an ion\n+and the sum of elemental compositions of its constituting amino acid residues.\n+\"\"\"\n+\n_isotope_string = r'^([A-Z][a-z+]*)(?:\\[(\\d+)\\])?$'\n_atom = r'([A-Z][a-z+]*)(?:\\[(\\d+)\\])?([+-]?\\d+)?'\n_formula = r'^({})*$'.format(_atom)\n@@ -262,6 +269,14 @@ class Composition(BasicComposition):\nmass_data : dict, optional\nA dict with the masses of chemical elements (the default\nvalue is :py:data:`nist_mass`). It is used for formulae parsing only.\n+ charge : int, optional\n+ If not 0 then additional protons are added to the composition.\n+ ion_comp : dict, optional\n+ A dict with the relative elemental compositions of peptide ion\n+ fragments (default is :py:data:`std_ion_comp`).\n+ ion_type : str, optional\n+ If specified, then the polypeptide is considered to be in the form\n+ of the corresponding ion. Do not forget to specify the charge state!\n\"\"\"\ndefaultdict.__init__(self, int)\n@@ -307,6 +322,20 @@ class Composition(BasicComposition):\nelse:\nself._from_dict(kwargs)\n+ ion_comp = kwargs.get('ion_comp', std_ion_comp)\n+ if 'ion_type' in kwargs:\n+ self += ion_comp[kwargs['ion_type']]\n+\n+ # Get charge\n+ charge = self['H+']\n+ if 'charge' in kwargs:\n+ if charge:\n+ raise PyteomicsError(\n+ 'Charge is specified both by the number of protons and '\n+ '`charge` in kwargs')\n+ charge = kwargs['charge']\n+ self['H+'] = charge\n+\ndef mass(self, **kwargs):\n\"\"\"Calculate the mass or *m/z* of a :py:class:`Composition`.\n@@ -334,23 +363,10 @@ class Composition(BasicComposition):\n-------\nmass : float\n\"\"\"\n- composition = self.copy()\n+ composition = self\nmass_data = kwargs.get('mass_data', nist_mass)\n- ion_comp = kwargs.get('ion_comp', std_ion_comp)\n- if 'ion_type' in kwargs:\n- composition += ion_comp[kwargs['ion_type']]\n-\n- # Get charge.\n- charge = composition['H+']\n- if 'charge' in kwargs:\n- if charge:\n- raise PyteomicsError(\n- 'Charge is specified both by the number of protons and '\n- '`charge` in kwargs')\n- charge = kwargs['charge']\n- composition['H+'] = charge\n- # Calculate mass.\n+ # Calculate mass\nmass = 0.0\naverage = kwargs.get('average', False)\nfor isotope_string, amount in composition.items():\n@@ -364,8 +380,11 @@ class Composition(BasicComposition):\nelse:\nmass += (amount * mass_data[element_name][isotope_num][0])\n- # Calculate m/z if required.\n+ # Calculate m/z if required\n+ charge = kwargs.get('charge', composition['H+'])\nif charge:\n+ if not composition['H+']:\n+ mass += mass_data['H+'][0][0] * charge\nmass /= charge\nreturn mass\n@@ -396,7 +415,7 @@ std_aa_comp.update({\n'-OH': Composition({'O': 1, 'H': 1}),\n})\n-std_ion_comp = {\n+std_ion_comp.update({\n'M': Composition(formula=''),\n'a': Composition(formula='H-2O-1' + 'C-1O-1'),\n'a-H2O': Composition(formula='H-2O-1' + 'C-1O-1' + 'H-2O-1'),\n@@ -416,12 +435,8 @@ std_ion_comp = {\n'z': Composition(formula='H-2O-1' + 'ON-1H-1'),\n'z-H2O': Composition(formula='H-2O-1' + 'ON-1H-1' + 'H-2O-1'),\n'z-NH3': Composition(formula='H-2O-1' + 'ON-1H-1' + 'N-1H-3'),\n- }\n-\"\"\"A dict with relative elemental compositions of the standard peptide\n-fragment ions. An elemental composition of a fragment ion is calculated as a\n-difference between the total elemental composition of an ion\n-and the sum of elemental compositions of its constituting amino acid residues.\n-\"\"\"\n+ })\n+\ndef calculate_mass(*args, **kwargs):\n\"\"\"Calculates the monoisotopic mass of a polypeptide defined by a\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mass.py",
"new_path": "tests/test_mass.py",
"diff": "@@ -142,31 +142,31 @@ class MassTest(unittest.TestCase):\n# Calculate mass by a formula.\nself.assertEqual(\nmass.calculate_mass(formula='ABCDE', mass_data=self.mass_data),\n- sum([self.mass_data[atom][0][0] for atom in 'ABCDE']))\n+ sum(self.mass_data[atom][0][0] for atom in 'ABCDE'))\n# Calculate mass by a sequence.\nself.assertEqual(\nmass.calculate_mass(sequence='XYZ',\naa_comp=self.aa_comp,\nmass_data=self.mass_data),\n- sum([self.mass_data[atom][0][0] for atom in 'ABCDE']))\n+ sum(self.mass_data[atom][0][0] for atom in 'ABCDE'))\n# Calculate mass by a parsed sequence.\nself.assertEqual(\nmass.calculate_mass(parsed_sequence=['H-','X','Y','Z','-OH'],\naa_comp=self.aa_comp,\nmass_data=self.mass_data),\n- sum([self.mass_data[atom][0][0] for atom in 'ABCDE']))\n+ sum(self.mass_data[atom][0][0] for atom in 'ABCDE'))\n# Calculate average mass by a formula.\nself.assertEqual(\nmass.calculate_mass(formula='ABCDE',\naverage=True,\nmass_data=self.mass_data),\n- sum([self.mass_data[atom][isotope][0]\n+ sum(self.mass_data[atom][isotope][0]\n* self.mass_data[atom][isotope][1]\nfor atom in 'ABCDE'\n- for isotope in self.mass_data[atom] if isotope != 0]))\n+ for isotope in self.mass_data[atom] if isotope != 0))\n# Calculate m/z of an ion.\nfor charge in [1,2,3]:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Move ion_comp and charge logic into Composition constructor |
377,522 | 07.06.2017 22:46:32 | -10,800 | 6cc9199c9e5eeea9c27d2a2d262edd82374ad29c | Fix in pepxml.DataFrame for spectrum_queries without a search_hit | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -356,6 +356,7 @@ def DataFrame(*args, **kwargs):\nfor k, v in item.items():\nif isinstance(v, (str, int, float)):\ninfo[k] = v\n+ if 'search_hit' in item:\nsh = item['search_hit'][0]\nproteins = sh.pop('proteins')\nprot_dict = {}\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix in pepxml.DataFrame for spectrum_queries without a search_hit |
377,522 | 09.06.2017 19:38:09 | -10,800 | ff63a10f6780eb34b099051d52371d31d4353c4a | Fixes in ms1; read_header still fails test | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms1.py",
"new_path": "pyteomics/ms1.py",
"diff": "@@ -99,8 +99,20 @@ def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dty\nmasses = []\nintensities = []\nif use_header: params.update(header)\n+\n+ def make_out():\n+ out = {'params': params}\n+ if convert_arrays:\n+ data = {'m/z array': masses, 'intensity array': intensities}\n+ for key, values in data.items():\n+ out[key] = np.array(values, dtype=dtype_dict.get(key))\n+ else:\n+ out['m/z array'] = masses\n+ out['intensity array'] = intensities\n+ return out\n+\nfor line in source:\n- sline = line.strip().split(maxsplit=2)\n+ sline = line.strip().split(None, 2)\nif not reading_spectrum:\nif sline[0] == 'S':\nreading_spectrum = True\n@@ -110,16 +122,7 @@ def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dty\nif not sline:\npass\nelif sline[0] == 'S':\n- out = {'params': params}\n- if convert_arrays:\n- data = {'m/z array': masses, 'intensity array': intensities}\n- for key, values in data.items():\n- out[key] = np.array(values, dtype=dtype_dict.get(key))\n- else:\n- out['m/z array'] = masses\n- out['intensity array'] = intensities\n- yield out\n- del out\n+ yield make_out()\nparams = dict(header) if use_header else {}\nmasses = []\nintensities = []\n@@ -137,6 +140,8 @@ def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dty\nexcept IndexError:\npass\n+ yield make_out()\n+\n@aux._keepstate\ndef read_header(source):\n\"\"\"\n@@ -157,7 +162,7 @@ def read_header(source):\nwith aux._file_obj(source, 'r') as source:\nheader = {}\nfor line in source:\n- l = line.split(maxsplit=2)\n+ l = line.split(None, 2)\nif l[0] != 'H':\nbreak\nkey = l[1]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -264,7 +264,6 @@ mzid_spectra = {(False, False): [{'id': 'SEQ_spec1',\n{'id': 'Mas_spec40',\n'spectraData_ref': 'LCMALDI_spectra',\n'spectrumID': 'databasekey=40'}],\n-\n(False, True): [{'FileFormat': 'Proteinscape spectra',\n'SpectrumIDFormat': 'spectrum from database nativeID format',\n'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',\n@@ -1572,3 +1571,40 @@ mzxml_spectra = [\n{'polarity': '+', 'id': '19', 'basePeakIntensity': 120053.0, 'highMz': 1800.0, 'msLevel': 1, 'totIonCurrent': 16675500.0, 'peaksCount': 1313, 'num': '19', 'basePeakMz': 445.347, 'lowMz': 400.0, 'intensity array': makeCA([11411.0, 24104.0, 26457.0, 21981.0, 6883.0, 12824.0, 21671.0, 21360.0, 15150.0, 5142.0, 19249.0, 9117.0, 11344.0, 6406.0, 13941.0, 18443.0, 10493.0, 18646.0, 16825.0, 16874.0, 15641.0, 21131.0, 22189.0, 5183.0, 16021.0, 13994.0, 7778.0, 19195.0, 14369.0, 15403.0, 21316.0, 41381.0, 39128.0, 34936.0, 29871.0, 18967.0, 20278.0, 18010.0, 14258.0, 19970.0, 12478.0, 10118.0, 16659.0, 13724.0, 12649.0, 6376.0, 24213.0, 7070.0, 120053.0, 58799.0, 61671.0, 54718.0, 28383.0, 23848.0, 7399.0, 6297.0, 14041.0, 15005.0, 8792.0, 11082.0, 12722.0, 7377.0, 11877.0, 21741.0, 12004.0, 29042.0, 16966.0, 19835.0, 18742.0, 9041.0, 27173.0, 21811.0, 11173.0, 16283.0, 16579.0, 12884.0, 10654.0, 13874.0, 16176.0, 12204.0, 14599.0, 10778.0, 20339.0, 14452.0, 15418.0, 21373.0, 21895.0, 8022.0, 20560.0, 18443.0, 12916.0, 11245.0, 22588.0, 11455.0, 751.0, 8924.0, 21950.0, 19053.0, 14923.0, 10394.0, 14730.0, 17218.0, 19043.0, 27353.0, 12905.0, 21255.0, 13775.0, 6148.0, 31961.0, 36355.0, 27402.0, 18733.0, 10916.0, 24126.0, 13597.0, 27047.0, 11050.0, 9832.0, 10881.0, 8122.0, 13121.0, 20521.0, 12938.0, 13500.0, 20867.0, 17165.0, 28059.0, 21600.0, 35282.0, 17374.0, 25068.0, 16965.0, 11691.0, 25549.0, 15092.0, 16639.0, 12203.0, 15932.0, 15245.0, 10202.0, 24397.0, 18726.0, 20938.0, 18502.0, 11599.0, 24470.0, 27960.0, 28876.0, 17897.0, 18927.0, 17035.0, 13465.0, 16730.0, 4832.0, 14885.0, 12357.0, 15071.0, 23074.0, 8629.0, 29741.0, 5200.0, 26115.0, 17271.0, 10191.0, 14820.0, 15604.0, 14918.0, 15566.0, 14833.0, 16632.0, 21396.0, 7266.0, 5278.0, 23519.0, 28788.0, 11377.0, 17790.0, 15389.0, 14530.0, 14805.0, 10846.0, 19968.0, 29468.0, 24502.0, 20271.0, 9337.0, 15921.0, 14262.0, 20108.0, 9629.0, 20790.0, 9363.0, 17728.0, 22333.0, 23232.0, 15757.0, 21556.0, 19224.0, 15883.0, 28759.0, 14968.0, 22996.0, 15180.0, 14335.0, 10336.0, 17455.0, 17417.0, 15072.0, 14887.0, 20360.0, 10668.0, 23430.0, 23263.0, 11970.0, 14183.0, 28379.0, 24917.0, 9903.0, 24070.0, 17788.0, 20761.0, 18110.0, 26427.0, 28324.0, 15582.0, 23429.0, 25959.0, 14056.0, 28299.0, 20705.0, 8229.0, 24308.0, 15135.0, 19872.0, 23567.0, 11376.0, 14067.0, 5692.0, 18327.0, 32557.0, 13156.0, 25174.0, 21468.0, 25614.0, 12090.0, 35738.0, 8162.0, 2230.0, 18652.0, 22763.0, 16874.0, 21453.0, 15238.0, 17615.0, 5577.0, 25976.0, 19623.0, 16849.0, 20520.0, 26799.0, 20745.0, 13276.0, 14254.0, 25777.0, 19437.0, 18389.0, 31165.0, 20444.0, 20972.0, 26061.0, 18309.0, 17448.0, 13604.0, 33785.0, 32109.0, 25213.0, 29280.0, 15551.0, 14529.0, 18876.0, 15021.0, 16654.0, 8115.0, 14176.0, 3845.0, 28960.0, 23626.0, 27749.0, 22988.0, 13845.0, 39584.0, 18559.0, 24574.0, 30149.0, 19535.0, 25441.0, 25216.0, 19921.0, 26871.0, 22542.0, 15680.0, 29222.0, 18918.0, 21871.0, 20877.0, 29709.0, 28149.0, 33227.0, 18236.0, 35461.0, 7444.0, 20046.0, 25515.0, 16744.0, 10234.0, 25093.0, 32090.0, 25907.0, 22234.0, 23100.0, 25098.0, 17946.0, 24657.0, 24994.0, 19046.0, 17935.0, 33363.0, 24092.0, 26359.0, 29935.0, 23926.0, 11813.0, 22773.0, 18145.0, 21137.0, 24471.0, 26509.0, 6985.0, 30560.0, 5656.0, 24421.0, 26001.0, 19333.0, 15914.0, 33926.0, 4729.0, 15490.0, 23467.0, 22454.0, 14445.0, 17729.0, 14513.0, 16846.0, 26771.0, 22038.0, 19101.0, 15749.0, 25512.0, 18934.0, 15818.0, 34068.0, 12938.0, 20035.0, 13928.0, 13751.0, 36530.0, 31069.0, 15567.0, 17604.0, 20921.0, 21113.0, 16819.0, 18289.0, 20724.0, 36578.0, 17466.0, 23061.0, 19330.0, 21268.0, 20903.0, 18340.0, 26527.0, 18200.0, 30517.0, 12496.0, 15373.0, 10099.0, 26572.0, 15534.0, 14725.0, 24366.0, 14791.0, 24245.0, 2347.0, 23615.0, 19999.0, 21966.0, 25961.0, 21287.0, 20494.0, 16319.0, 11968.0, 21764.0, 29111.0, 20500.0, 19182.0, 12419.0, 6752.0, 35981.0, 11359.0, 33828.0, 17990.0, 19480.0, 17326.0, 26179.0, 19991.0, 35022.0, 21962.0, 18293.0, 11745.0, 21443.0, 18498.0, 16856.0, 13911.0, 21427.0, 27797.0, 13135.0, 11573.0, 20013.0, 21824.0, 40721.0, 8876.0, 11736.0, 17404.0, 12242.0, 20668.0, 22629.0, 14415.0, 24468.0, 20045.0, 21601.0, 13611.0, 20452.0, 9472.0, 28138.0, 25649.0, 17563.0, 11449.0, 31620.0, 33606.0, 12953.0, 17304.0, 15832.0, 16587.0, 18759.0, 18818.0, 27442.0, 21765.0, 19069.0, 28825.0, 14218.0, 18246.0, 16948.0, 34438.0, 15412.0, 16274.0, 17358.0, 25344.0, 12208.0, 27550.0, 13035.0, 18310.0, 23319.0, 29225.0, 14338.0, 22462.0, 7620.0, 20663.0, 19790.0, 30480.0, 10846.0, 17997.0, 17069.0, 20419.0, 15792.0, 26581.0, 24764.0, 28308.0, 9438.0, 36253.0, 28219.0, 27562.0, 32185.0, 10668.0, 17971.0, 9369.0, 16038.0, 7169.0, 21699.0, 21310.0, 11058.0, 15625.0, 13411.0, 17925.0, 20257.0, 19349.0, 31367.0, 24044.0, 21245.0, 26513.0, 26599.0, 24617.0, 11083.0, 24603.0, 12578.0, 14642.0, 19505.0, 20384.0, 21082.0, 13038.0, 8482.0, 23670.0, 18882.0, 24236.0, 25244.0, 22897.0, 12033.0, 23165.0, 10023.0, 22689.0, 21589.0, 17377.0, 15767.0, 15941.0, 24538.0, 15693.0, 22880.0, 24012.0, 11444.0, 32171.0, 10975.0, 17465.0, 16607.0, 17831.0, 22604.0, 14539.0, 15862.0, 10581.0, 23862.0, 28905.0, 25011.0, 36643.0, 25362.0, 8760.0, 12123.0, 12017.0, 12345.0, 16356.0, 17289.0, 18242.0, 16203.0, 27144.0, 17031.0, 13800.0, 14856.0, 22373.0, 9468.0, 26171.0, 15812.0, 12296.0, 13598.0, 24062.0, 24733.0, 27368.0, 14258.0, 20907.0, 34741.0, 9031.0, 11547.0, 16652.0, 23376.0, 22739.0, 14860.0, 21003.0, 12140.0, 12299.0, 17659.0, 13463.0, 11638.0, 11103.0, 14331.0, 9036.0, 14708.0, 13808.0, 9478.0, 18252.0, 7318.0, 13317.0, 11962.0, 18399.0, 15630.0, 26869.0, 9493.0, 19661.0, 9151.0, 17478.0, 15717.0, 11947.0, 25870.0, 10619.0, 4967.0, 4407.0, 23679.0, 13463.0, 28370.0, 21746.0, 10257.0, 18819.0, 18331.0, 15616.0, 15391.0, 11121.0, 9006.0, 28670.0, 14547.0, 12729.0, 24116.0, 18969.0, 14256.0, 12762.0, 22671.0, 34569.0, 16841.0, 16448.0, 11357.0, 11932.0, 10505.0, 21017.0, 13939.0, 10841.0, 18196.0, 13169.0, 10237.0, 11095.0, 15895.0, 13967.0, 13244.0, 16045.0, 15984.0, 14962.0, 9562.0, 29133.0, 3777.0, 19409.0, 17706.0, 16988.0, 7733.0, 21684.0, 5061.0, 6130.0, 17908.0, 25642.0, 13197.0, 12499.0, 13419.0, 10540.0, 12168.0, 16621.0, 15579.0, 16498.0, 6945.0, 13174.0, 12525.0, 11536.0, 13709.0, 17849.0, 9068.0, 23164.0, 16403.0, 9277.0, 33817.0, 32299.0, 10936.0, 8196.0, 9499.0, 14882.0, 25389.0, 8486.0, 15582.0, 8486.0, 8900.0, 9528.0, 6881.0, 17379.0, 10573.0, 20301.0, 19891.0, 9075.0, 14453.0, 26268.0, 11892.0, 14169.0, 15331.0, 23524.0, 8599.0, 13800.0, 19973.0, 17331.0, 13295.0, 9814.0, 7919.0, 5806.0, 10066.0, 12183.0, 7033.0, 20926.0, 19987.0, 20325.0, 4084.0, 7169.0, 6286.0, 16727.0, 5308.0, 15225.0, 8333.0, 7509.0, 16330.0, 18430.0, 9696.0, 10567.0, 10294.0, 13527.0, 17464.0, 4806.0, 9731.0, 14552.0, 7373.0, 14384.0, 13841.0, 18365.0, 13729.0, 8981.0, 8211.0, 18784.0, 16519.0, 9166.0, 8857.0, 4515.0, 13507.0, 4007.0, 11951.0, 5867.0, 19044.0, 10793.0, 5736.0, 14061.0, 19776.0, 1852.0, 7836.0, 3839.0, 3497.0, 12939.0, 400.0, 17525.0, 9941.0, 10136.0, 7386.0, 2874.0, 11984.0, 9659.0, 13837.0, 14899.0, 16949.0, 11096.0, 16434.0, 3696.0, 10241.0, 8483.0, 14962.0, 3763.0, 13840.0, 4172.0, 8208.0, 11448.0, 16043.0, 1414.0, 7910.0, 3867.0, 9856.0, 8235.0, 12281.0, 5712.0, 12212.0, 11185.0, 6827.0, 14356.0, 8187.0, 8840.0, 11619.0, 10035.0, 14740.0, 12464.0, 5509.0, 22634.0, 12178.0, 7228.0, 15923.0, 4476.0, 4031.0, 3449.0, 11040.0, 5726.0, 9838.0, 18725.0, 4204.0, 53477.0, 16037.0, 10616.0, 5125.0, 10235.0, 27880.0, 9318.0, 16184.0, 12630.0, 12914.0, 6321.0, 2221.0, 7615.0, 13992.0, 11813.0, 5618.0, 3515.0, 11687.0, 2.0, 9343.0, 5264.0, 17692.0, 5618.0, 9575.0, 2029.0, 13811.0, 13912.0, 5854.0, 2278.0, 9210.0, 8293.0, 5614.0, 2890.0, 14638.0, 8567.0, 8570.0, 9787.0, 17110.0, 7276.0, 13879.0, 7860.0, 18351.0, 6592.0, 8735.0, 6256.0, 4716.0, 5843.0, 7464.0, 5733.0, 10935.0, 9816.0, 2096.0, 2324.0, 6874.0, 11377.0, 12525.0, 13453.0, 4436.0, 9483.0, 5155.0, 6423.0, 5625.0, 12663.0, 7164.0, 4484.0, 6059.0, 9746.0, 6337.0, 15404.0, 4587.0, 11491.0, 6498.0, 6004.0, 20370.0, 8741.0, 6085.0, 12448.0, 10631.0, 8891.0, 11267.0, 13932.0, 9184.0, 10788.0, 2770.0, 8854.0, 6306.0, 8784.0, 1670.0, 6179.0, 5763.0, 11338.0, 8038.0, 9710.0, 4552.0, 6810.0, 7162.0, 3152.0, 8581.0, 14447.0, 5790.0, 3117.0, 6933.0, 8781.0, 10867.0, 5000.0, 9507.0, 4926.0, 5738.0, 3467.0, 8971.0, 6728.0, 3417.0, 4001.0, 13179.0, 4545.0, 7287.0, 13181.0, 2307.0, 12618.0, 1.0, 5258.0, 7972.0, 10163.0, 8529.0, 7788.0, 3281.0, 3374.0, 4801.0, 7489.0, 2099.0, 3978.0, 6641.0, 9788.0, 10189.0, 7099.0, 9885.0, 5638.0, 8278.0, 10031.0, 7038.0, 10246.0, 10104.0, 10057.0, 6767.0, 7945.0, 4618.0, 3428.0, 5641.0, 2037.0, 1582.0, 5013.0, 9966.0, 8718.0, 5153.0, 3545.0, 6190.0, 3095.0, 3809.0, 7869.0, 293.0, 3450.0, 5198.0, 4633.0, 2466.0, 2263.0, 6963.0, 6210.0, 2847.0, 1888.0, 4740.0, 4613.0, 4702.0, 4492.0, 12312.0, 4014.0, 1.0, 4880.0, 4372.0, 9673.0, 5895.0, 8190.0, 5008.0, 11133.0, 3957.0, 5351.0, 4171.0, 9522.0, 2626.0, 2856.0, 5869.0, 8243.0, 6736.0, 1661.0, 5160.0, 2544.0, 1735.0, 1772.0, 6673.0, 2560.0, 693.0, 4590.0, 6434.0, 3894.0, 3634.0, 11300.0, 4903.0, 2021.0, 5122.0, 1705.0, 2315.0, 9875.0, 6988.0, 5342.0, 2985.0, 1296.0, 786.0, 330.0, 3855.0, 6084.0, 695.0, 3100.0, 955.0, 3332.0, 2108.0, 3055.0, 6827.0, 9644.0, 2350.0, 3803.0, 7983.0, 3374.0, 4991.0, 4201.0, 9586.0, 1606.0, 9359.0, 3386.0, 6139.0, 3641.0, 1365.0, 5385.0, 8636.0, 3568.0, 7654.0, 3020.0, 2700.0, 6707.0, 1364.0, 5598.0, 1235.0, 8451.0, 6638.0, 3447.0, 2149.0, 2724.0, 1684.0, 2775.0, 3842.0, 4948.0, 1292.0, 4620.0, 9864.0, 3501.0, 2737.0, 2424.0, 1691.0, 2409.0, 1350.0, 3366.0, 2743.0, 1163.0, 1488.0, 4977.0, 2517.0, 3052.0, 2825.0, 2760.0, 640.0, 2051.0, 1832.0, 2580.0, 5121.0, 4174.0, 3054.0, 5413.0, 3292.0, 2288.0, 2462.0, 3282.0, 8386.0, 3307.0, 4024.0, 2277.0, 3530.0, 1931.0, 2213.0, 939.0, 2600.0, 5895.0, 2109.0, 5930.0, 392.0, 2401.0, 5965.0, 1602.0, 6670.0, 3591.0, 2930.0, 2464.0, 4300.0, 5849.0, 3491.0, 393.0, 1652.0, 2978.0, 1126.0, 1246.0, 7694.0, 2327.0, 2113.0, 2263.0, 4199.0, 4334.0, 1676.0, 4168.0, 4340.0, 740.0, 5077.0, 1669.0, 1868.0, 1663.0, 836.0, 5071.0, 2316.0, 6424.0, 3388.0, 2212.0, 3921.0, 880.0, 3232.0, 6874.0, 2166.0, 1034.0, 4562.0, 1104.0, 1175.0, 2570.0, 899.0, 2255.0, 5060.0, 671.0, 2382.0, 2179.0, 1032.0, 4165.0, 3924.0, 1548.0, 3790.0, 851.0, 2603.0, 472.0, 1848.0, 2210.0, 1252.0, 3452.0, 743.0, 1546.0, 1548.0, 4476.0, 886.0, 824.0, 1849.0, 4487.0, 2980.0, 1864.0, 2509.0, 1128.0, 2915.0, 4321.0, 6325.0, 2719.0, 1025.0, 6508.0, 3149.0, 4839.0, 1738.0, 4961.0, 361.0, 1765.0, 3128.0, 372.0, 1065.0, 1253.0, 3452.0, 3177.0, 745.0, 1382.0, 2388.0, 3679.0, 3528.0, 1196.0, 1869.0, 2909.0, 3715.0, 5387.0, 953.0, 1265.0, 1484.0, 2505.0, 619.0, 312.0, 2589.0, 6526.0, 1264.0, 1269.0, 3158.0, 4040.0, 1537.0, 3303.0, 1479.0, 1373.0, 3826.0, 2270.0, 2706.0, 1421.0, 2156.0, 4042.0, 5246.0, 1138.0, 1019.0, 1073.0, 884.0, 633.0, 1937.0, 5526.0, 3592.0, 2725.0, 1890.0, 1922.0, 2358.0, 546.0, 5221.0, 649.0, 465.0, 671.0, 1101.0, 3990.0, 890.0, 3254.0, 1686.0, 1074.0, 894.0, 1431.0, 5398.0, 1122.0, 5231.0, 3673.0, 2565.0, 636.0, 642.0, 2411.0, 5724.0, 817.0, 1528.0, 1087.0, 2405.0, 776.0, 2796.0, 3874.0, 933.0, 10114.0, 2131.0, 3491.0, 710.0, 1991.0, 1256.0, 1673.0, 616.0, 513.0, 2674.0, 1551.0, 4945.0, 993.0, 3750.0, 407.0, 4520.0, 834.0, 3829.0, 1575.0, 382.0, 2086.0, 1848.0, 1175.0, 1855.0, 932.0, 828.0, 897.0, 3686.0]), 'm/z array': makeCA([400.38958740234375, 401.03533935546875, 402.035888671875, 403.2169189453125, 403.97320556640625, 404.91033935546875, 405.83642578125, 407.06207275390625, 407.87646484375, 408.66229248046875, 409.37652587890625, 410.37713623046875, 411.50885009765625, 412.57891845703125, 413.4959716796875, 414.520263671875, 415.25408935546875, 415.918212890625, 416.7078857421875, 417.9366455078125, 418.97564697265625, 419.6207275390625, 420.6142578125, 421.38037109375, 422.5335693359375, 423.6138916015625, 424.50970458984375, 425.468505859375, 426.224365234375, 427.05621337890625, 428.4556884765625, 429.41375732421875, 430.16998291015625, 431.1475830078125, 432.0792236328125, 432.94671630859375, 433.82623291015625, 434.9476318359375, 435.899169921875, 436.917236328125, 438.03265380859375, 439.1148681640625, 440.152099609375, 440.96136474609375, 441.72412109375, 442.4854736328125, 443.546630859375, 444.3160400390625, 445.3466796875, 446.29937744140625, 447.34368896484375, 448.51068115234375, 449.63824462890625, 450.67681884765625, 451.4376220703125, 452.040283203125, 452.69329833984375, 453.514892578125, 454.34765625, 455.23687744140625, 456.094970703125, 456.83660888671875, 457.56396484375, 458.7027587890625, 459.7601318359375, 460.78106689453125, 461.95208740234375, 462.71435546875, 463.43890380859375, 464.15802001953125, 465.26104736328125, 466.5059814453125, 467.46826171875, 468.418212890625, 469.4296875, 470.56182861328125, 471.5120849609375, 472.4197998046875, 473.44354248046875, 474.4901123046875, 475.31768798828125, 476.254638671875, 477.11016845703125, 478.36065673828125, 479.27020263671875, 480.54595947265625, 481.48443603515625, 482.56103515625, 483.2381591796875, 484.52655029296875, 485.4844970703125, 486.3204345703125, 487.4210205078125, 488.37890625, 489.0980224609375, 489.71588134765625, 490.71881103515625, 492.0147705078125, 493.04107666015625, 494.34246826171875, 495.52935791015625, 496.4515380859375, 497.218505859375, 498.20782470703125, 499.23138427734375, 500.26983642578125, 501.19921875, 502.0230712890625, 502.9676513671875, 504.03082275390625, 505.01971435546875, 505.96734619140625, 506.61187744140625, 507.59283447265625, 508.44256591796875, 509.37042236328125, 510.18560791015625, 510.84991455078125, 511.90777587890625, 512.7205810546875, 513.6148681640625, 514.3619384765625, 515.236083984375, 516.13232421875, 517.062744140625, 518.3779296875, 519.432373046875, 520.388671875, 521.2822265625, 522.173583984375, 523.1622314453125, 524.162841796875, 524.95166015625, 525.93212890625, 527.1358642578125, 527.83203125, 528.657958984375, 529.42138671875, 530.356689453125, 531.1588134765625, 531.86474609375, 532.654052734375, 533.808837890625, 534.8798828125, 535.730712890625, 536.622314453125, 537.31787109375, 538.481689453125, 539.50146484375, 540.3681640625, 541.459228515625, 542.43408203125, 543.39501953125, 544.351318359375, 544.9697265625, 545.6025390625, 546.28076171875, 547.1396484375, 548.26806640625, 549.33984375, 550.1533203125, 551.049560546875, 551.99755859375, 552.945068359375, 553.783935546875, 554.453125, 555.311279296875, 556.22900390625, 557.625732421875, 558.461181640625, 559.496337890625, 560.4454345703125, 561.088134765625, 561.8837890625, 562.8387451171875, 563.7255859375, 565.1561279296875, 566.068603515625, 567.09228515625, 568.2957763671875, 569.251953125, 569.9794921875, 571.216064453125, 572.399169921875, 573.3642578125, 574.1414794921875, 575.16162109375, 576.0498046875, 577.20849609375, 578.1102294921875, 579.08349609375, 580.354736328125, 580.9705810546875, 582.02392578125, 582.858642578125, 583.697021484375, 584.751708984375, 585.736083984375, 586.722412109375, 587.48779296875, 588.52685546875, 589.371826171875, 590.213623046875, 591.238525390625, 592.108154296875, 593.032470703125, 593.7459716796875, 594.427490234375, 595.29833984375, 596.341064453125, 597.212646484375, 598.0889892578125, 599.399658203125, 600.26123046875, 601.076171875, 602.169921875, 603.362060546875, 604.254150390625, 605.0965576171875, 606.388427734375, 607.4422607421875, 608.5830078125, 609.69775390625, 610.7020263671875, 611.5001220703125, 612.1220703125, 613.044677734375, 613.8404541015625, 614.84814453125, 615.8154296875, 616.649658203125, 617.3739013671875, 618.20458984375, 619.2890625, 620.2357177734375, 621.212646484375, 622.00048828125, 622.8720703125, 623.511962890625, 624.38818359375, 625.419677734375, 626.416015625, 627.5302734375, 628.47265625, 629.5888671875, 630.49609375, 631.2301025390625, 631.945556640625, 632.5703125, 633.6016845703125, 634.5078125, 635.372314453125, 636.2647705078125, 637.4208984375, 638.0455322265625, 638.9873046875, 640.164794921875, 641.2568359375, 642.148193359375, 643.3486328125, 644.196533203125, 645.092041015625, 645.87744140625, 646.763427734375, 647.722900390625, 648.896240234375, 649.9566650390625, 651.0927734375, 652.0440673828125, 653.2078857421875, 654.2161865234375, 655.0166015625, 655.835693359375, 656.9476318359375, 658.0146484375, 659.3863525390625, 660.5687255859375, 661.540283203125, 662.5528564453125, 663.302734375, 664.231689453125, 665.039794921875, 665.76318359375, 666.485107421875, 667.159423828125, 668.114501953125, 669.1845703125, 670.24853515625, 671.191650390625, 672.0020751953125, 672.87109375, 674.0721435546875, 675.0921630859375, 676.335205078125, 677.490966796875, 678.546630859375, 679.611083984375, 680.4100341796875, 681.339111328125, 682.6435546875, 683.556884765625, 684.397216796875, 685.374267578125, 686.227783203125, 687.2574462890625, 688.130615234375, 689.1865234375, 690.2244873046875, 691.4127197265625, 692.466552734375, 693.337158203125, 694.10302734375, 695.171875, 696.17041015625, 696.811279296875, 697.655517578125, 698.604248046875, 699.7451171875, 700.957763671875, 701.9703369140625, 703.026123046875, 704.0335693359375, 704.848876953125, 705.968017578125, 706.94970703125, 707.863037109375, 708.7841796875, 709.7867431640625, 710.8990478515625, 711.891845703125, 713.140869140625, 713.886474609375, 714.630859375, 715.511962890625, 716.5302734375, 717.387939453125, 718.404541015625, 719.1859130859375, 719.99853515625, 720.786865234375, 721.42138671875, 722.247802734375, 723.229736328125, 724.130126953125, 725.0079345703125, 725.6214599609375, 726.467041015625, 727.396240234375, 728.22216796875, 729.223876953125, 730.02197265625, 730.7550048828125, 731.358154296875, 732.147216796875, 733.08056640625, 733.789306640625, 734.8394775390625, 736.1195068359375, 737.3280029296875, 738.341796875, 739.2176513671875, 740.0177001953125, 740.974853515625, 741.93212890625, 742.6605224609375, 743.4564208984375, 744.5606689453125, 745.465576171875, 746.3536376953125, 747.201416015625, 748.1258544921875, 748.8831787109375, 749.83056640625, 750.6607666015625, 751.9267578125, 753.1162109375, 754.1434326171875, 755.36669921875, 756.35107421875, 757.1273193359375, 758.007080078125, 758.7608642578125, 759.865478515625, 760.9664306640625, 761.7222900390625, 762.766357421875, 763.765869140625, 764.5450439453125, 765.3704833984375, 766.18017578125, 767.0062255859375, 767.79833984375, 768.83837890625, 769.461181640625, 770.11962890625, 771.2366943359375, 772.277099609375, 773.2481689453125, 774.138671875, 775.2012939453125, 776.0504150390625, 776.871337890625, 777.86083984375, 779.0703125, 780.060791015625, 781.0340576171875, 782.0849609375, 782.773681640625, 783.5970458984375, 784.5537109375, 785.3486328125, 786.3221435546875, 787.1483154296875, 788.158203125, 788.9156494140625, 789.9228515625, 791.00927734375, 791.859619140625, 792.6927490234375, 793.48681640625, 794.3616943359375, 795.26318359375, 796.22314453125, 797.01318359375, 797.885009765625, 799.123779296875, 800.2498779296875, 801.010498046875, 801.75146484375, 802.5615234375, 803.5667724609375, 804.52294921875, 805.369140625, 806.0634765625, 806.6678466796875, 807.335693359375, 808.247314453125, 809.06005859375, 810.025634765625, 810.9266357421875, 811.94140625, 812.888671875, 813.6966552734375, 814.395751953125, 815.400146484375, 816.6763916015625, 817.5902099609375, 818.432373046875, 819.2447509765625, 820.334228515625, 821.349609375, 822.0946044921875, 822.8134765625, 823.5904541015625, 824.466552734375, 825.4178466796875, 826.455322265625, 827.565673828125, 828.312255859375, 829.205078125, 830.0302734375, 830.920654296875, 831.8514404296875, 832.850830078125, 833.6767578125, 834.501220703125, 835.38671875, 836.358642578125, 837.1220703125, 837.958740234375, 838.961669921875, 839.9578857421875, 841.068115234375, 842.001953125, 843.1912841796875, 844.4072265625, 845.22265625, 846.176513671875, 847.0936279296875, 848.0589599609375, 848.9915771484375, 849.801513671875, 850.8953857421875, 851.943359375, 852.8096923828125, 853.85595703125, 855.0648193359375, 856.042236328125, 856.8214111328125, 857.915771484375, 858.9195556640625, 860.012451171875, 861.17333984375, 862.082763671875, 863.0733642578125, 863.9952392578125, 864.8193359375, 865.499755859375, 866.1728515625, 867.16259765625, 867.9429931640625, 868.8642578125, 869.75146484375, 870.7010498046875, 871.594482421875, 872.203369140625, 873.178466796875, 874.146728515625, 874.9632568359375, 876.011474609375, 877.1478271484375, 878.137451171875, 879.0302734375, 879.885986328125, 880.9954833984375, 881.829833984375, 882.77783203125, 883.58349609375, 884.70068359375, 885.7152099609375, 886.5029296875, 887.2774658203125, 888.166259765625, 889.111328125, 889.98486328125, 891.231201171875, 892.1761474609375, 893.028564453125, 893.94873046875, 894.856201171875, 895.86328125, 896.7916259765625, 897.7933349609375, 898.693115234375, 899.7535400390625, 900.71630859375, 901.667724609375, 903.014404296875, 904.119873046875, 904.83935546875, 905.889404296875, 906.8662109375, 907.9351806640625, 909.0986328125, 909.96923828125, 910.7926025390625, 912.05322265625, 912.8499755859375, 913.7193603515625, 914.7706298828125, 915.96484375, 917.104736328125, 918.2379150390625, 919.1361083984375, 919.8939208984375, 921.032470703125, 921.9166259765625, 922.7454833984375, 923.697265625, 924.7960205078125, 925.979248046875, 926.9443359375, 927.721435546875, 928.7205810546875, 929.767822265625, 930.7706298828125, 931.7349853515625, 932.7294921875, 933.8270263671875, 934.766357421875, 935.697265625, 936.5841064453125, 937.658447265625, 938.6866455078125, 940.0623779296875, 941.23486328125, 942.1427001953125, 943.04833984375, 943.7071533203125, 944.809326171875, 945.9200439453125, 947.064453125, 948.1424560546875, 949.1114501953125, 950.0234375, 950.919189453125, 951.90576171875, 952.79345703125, 953.675048828125, 954.4881591796875, 955.31640625, 956.2119140625, 956.946533203125, 957.9564208984375, 958.8848876953125, 960.013671875, 960.8348388671875, 961.733154296875, 963.04541015625, 964.576416015625, 965.685791015625, 966.8388671875, 967.9644775390625, 969.043212890625, 969.78857421875, 970.57080078125, 971.774169921875, 972.5782470703125, 973.530517578125, 974.415283203125, 975.2567138671875, 975.9061279296875, 976.678466796875, 977.737060546875, 978.7734375, 979.6895751953125, 980.69287109375, 981.6878662109375, 982.834228515625, 983.8946533203125, 984.76953125, 985.744140625, 986.6802978515625, 987.607421875, 988.8516845703125, 989.6602783203125, 990.83740234375, 992.0177001953125, 992.8641357421875, 993.79345703125, 994.74462890625, 996.4727783203125, 997.5208740234375, 998.2164306640625, 998.922119140625, 999.7427978515625, 1000.5955810546875, 1001.52685546875, 1002.6962890625, 1003.7646484375, 1004.7752685546875, 1006.0716552734375, 1006.9635009765625, 1007.8824462890625, 1008.68310546875, 1009.7298583984375, 1010.65673828125, 1011.7733154296875, 1012.6976318359375, 1013.6849365234375, 1014.634521484375, 1015.474853515625, 1016.2716064453125, 1017.0416259765625, 1018.36962890625, 1019.0325927734375, 1019.911865234375, 1020.7095947265625, 1021.3858642578125, 1021.9937744140625, 1022.7115478515625, 1023.47314453125, 1024.47021484375, 1025.56298828125, 1026.45849609375, 1027.4775390625, 1028.62255859375, 1029.66650390625, 1030.740234375, 1031.78076171875, 1032.7509765625, 1033.580810546875, 1034.82080078125, 1035.89501953125, 1036.65380859375, 1037.5478515625, 1038.529296875, 1039.6845703125, 1040.740478515625, 1041.713623046875, 1042.80419921875, 1043.5556640625, 1044.6923828125, 1045.724609375, 1046.6884765625, 1047.94970703125, 1049.199951171875, 1050.1494140625, 1051.01123046875, 1051.83642578125, 1053.063232421875, 1053.821044921875, 1054.839599609375, 1055.8935546875, 1056.59033203125, 1057.628662109375, 1058.71142578125, 1059.498046875, 1060.646728515625, 1061.85888671875, 1062.8408203125, 1063.971923828125, 1065.1044921875, 1066.3037109375, 1067.3388671875, 1068.47216796875, 1069.58935546875, 1070.874755859375, 1071.87255859375, 1072.61669921875, 1073.59423828125, 1074.499755859375, 1075.6572265625, 1076.328369140625, 1077.55322265625, 1078.5400390625, 1079.72216796875, 1080.673095703125, 1081.66552734375, 1082.6494140625, 1083.61962890625, 1084.7607421875, 1085.62548828125, 1086.58935546875, 1087.58935546875, 1088.59619140625, 1089.525634765625, 1090.396240234375, 1091.36181640625, 1092.49755859375, 1093.876708984375, 1094.72021484375, 1096.005859375, 1096.900634765625, 1097.75146484375, 1098.71533203125, 1099.52587890625, 1100.7333984375, 1101.50341796875, 1102.308349609375, 1103.593994140625, 1104.68115234375, 1105.702392578125, 1107.000732421875, 1107.818359375, 1108.44287109375, 1109.4775390625, 1110.138671875, 1111.1884765625, 1112.01904296875, 1112.9482421875, 1113.81103515625, 1114.8447265625, 1115.92236328125, 1116.7392578125, 1117.732421875, 1119.251708984375, 1119.99755859375, 1120.70849609375, 1121.7509765625, 1122.537353515625, 1123.3759765625, 1123.98681640625, 1124.924560546875, 1125.86083984375, 1126.73876953125, 1127.935546875, 1128.745849609375, 1129.50634765625, 1130.5107421875, 1131.557861328125, 1132.85107421875, 1134.09375, 1135.086181640625, 1136.333251953125, 1137.503662109375, 1138.17236328125, 1138.973876953125, 1139.9248046875, 1140.574951171875, 1141.69287109375, 1142.561767578125, 1143.27685546875, 1144.14404296875, 1145.25537109375, 1145.96337890625, 1146.803955078125, 1147.511962890625, 1148.37158203125, 1149.5185546875, 1150.5634765625, 1151.501953125, 1152.17138671875, 1152.93994140625, 1153.87109375, 1154.857421875, 1155.7646484375, 1156.84619140625, 1157.49462890625, 1158.392578125, 1159.5654296875, 1160.536865234375, 1161.6904296875, 1162.526123046875, 1163.4267578125, 1164.4580078125, 1165.7216796875, 1166.79833984375, 1167.888427734375, 1168.54345703125, 1169.4482421875, 1170.4443359375, 1171.52099609375, 1172.925537109375, 1173.585205078125, 1174.659423828125, 1176.258544921875, 1177.59423828125, 1178.89794921875, 1179.583740234375, 1180.365234375, 1181.583984375, 1182.658203125, 1183.61279296875, 1184.55322265625, 1185.21923828125, 1185.9619140625, 1186.689697265625, 1187.899658203125, 1188.697265625, 1189.4404296875, 1190.21142578125, 1191.803466796875, 1192.5, 1193.730224609375, 1194.675537109375, 1195.63720703125, 1196.69970703125, 1197.807373046875, 1198.7177734375, 1199.99267578125, 1201.32275390625, 1202.562744140625, 1203.42626953125, 1204.72802734375, 1205.5234375, 1206.78466796875, 1207.78125, 1208.93798828125, 1210.1318359375, 1211.028076171875, 1212.47265625, 1213.38818359375, 1214.44287109375, 1215.6640625, 1216.549072265625, 1217.72119140625, 1218.56103515625, 1219.66259765625, 1220.84130859375, 1221.638671875, 1222.54736328125, 1223.291259765625, 1224.15966796875, 1225.0556640625, 1226.285400390625, 1227.32958984375, 1228.735107421875, 1229.45458984375, 1230.4892578125, 1231.423828125, 1232.59423828125, 1233.65185546875, 1234.494140625, 1235.459228515625, 1236.769287109375, 1237.62158203125, 1238.386962890625, 1239.53857421875, 1240.73388671875, 1241.74853515625, 1242.87939453125, 1243.6806640625, 1244.5419921875, 1245.47705078125, 1246.611083984375, 1247.74072265625, 1248.61669921875, 1249.65625, 1251.15625, 1252.2275390625, 1253.28173828125, 1254.02734375, 1254.83154296875, 1256.08203125, 1256.70263671875, 1257.339111328125, 1258.02197265625, 1259.06884765625, 1260.0478515625, 1260.677490234375, 1261.44482421875, 1262.48828125, 1263.2939453125, 1264.525390625, 1265.42578125, 1266.28076171875, 1267.702392578125, 1268.50341796875, 1269.289794921875, 1270.760498046875, 1271.70849609375, 1272.588134765625, 1273.46435546875, 1274.454833984375, 1275.37744140625, 1276.61181640625, 1277.50390625, 1278.83349609375, 1280.004638671875, 1280.65771484375, 1281.583740234375, 1282.4130859375, 1283.975341796875, 1286.34912109375, 1287.2783203125, 1288.082763671875, 1289.128662109375, 1290.34912109375, 1291.50390625, 1292.42236328125, 1293.6240234375, 1294.3994140625, 1295.2666015625, 1295.93310546875, 1296.673583984375, 1297.292724609375, 1298.5595703125, 1300.0537109375, 1300.9287109375, 1301.671142578125, 1303.00048828125, 1304.3251953125, 1305.2900390625, 1306.359130859375, 1307.34033203125, 1308.115234375, 1309.553955078125, 1311.09423828125, 1312.6630859375, 1313.563720703125, 1314.6728515625, 1315.946044921875, 1317.196044921875, 1318.2314453125, 1319.547119140625, 1320.51806640625, 1321.36669921875, 1322.475830078125, 1324.06591796875, 1325.066162109375, 1326.1767578125, 1327.192138671875, 1327.84423828125, 1329.00732421875, 1330.0234375, 1330.87841796875, 1332.33642578125, 1333.59912109375, 1334.4501953125, 1335.6083984375, 1336.414306640625, 1337.505126953125, 1338.644287109375, 1339.3544921875, 1340.593017578125, 1341.7080078125, 1342.484375, 1343.54541015625, 1344.77490234375, 1345.6474609375, 1346.45068359375, 1347.565185546875, 1348.23876953125, 1349.42822265625, 1350.6728515625, 1351.409423828125, 1352.23779296875, 1353.0283203125, 1353.880126953125, 1354.533203125, 1355.537109375, 1356.57568359375, 1357.65673828125, 1358.765625, 1360.82275390625, 1361.900146484375, 1363.05224609375, 1364.3701171875, 1365.10302734375, 1365.755126953125, 1366.70556640625, 1367.60107421875, 1368.658203125, 1369.33935546875, 1370.2607421875, 1371.950927734375, 1373.420654296875, 1374.450439453125, 1375.58544921875, 1376.37353515625, 1377.73291015625, 1378.774658203125, 1379.80029296875, 1380.8291015625, 1381.52490234375, 1382.53271484375, 1383.57470703125, 1384.41259765625, 1385.621826171875, 1386.67822265625, 1387.771728515625, 1388.51513671875, 1389.171142578125, 1389.843505859375, 1390.7734375, 1392.29345703125, 1393.70751953125, 1394.69287109375, 1395.5009765625, 1396.59228515625, 1397.198486328125, 1398.34033203125, 1399.917236328125, 1400.81494140625, 1401.78857421875, 1402.5810546875, 1403.457275390625, 1404.945068359375, 1405.990234375, 1406.9208984375, 1407.742919921875, 1408.49267578125, 1409.36328125, 1410.3154296875, 1411.47900390625, 1412.48193359375, 1413.56103515625, 1414.64013671875, 1415.38916015625, 1416.151123046875, 1416.9501953125, 1418.3662109375, 1419.610107421875, 1420.81787109375, 1422.225341796875, 1423.06787109375, 1424.39892578125, 1425.3291015625, 1426.81103515625, 1427.83984375, 1429.290283203125, 1430.195556640625, 1431.437255859375, 1432.69287109375, 1434.609619140625, 1436.118896484375, 1437.706787109375, 1438.375732421875, 1439.245361328125, 1440.454833984375, 1442.134765625, 1442.849365234375, 1443.953857421875, 1445.473388671875, 1446.18505859375, 1447.553955078125, 1448.31103515625, 1449.299072265625, 1450.066650390625, 1450.80224609375, 1451.525634765625, 1452.308837890625, 1453.209716796875, 1454.205078125, 1455.103515625, 1456.060791015625, 1457.433837890625, 1459.093994140625, 1460.364990234375, 1461.049072265625, 1463.0107421875, 1464.96484375, 1465.69140625, 1466.324951171875, 1467.36328125, 1470.156982421875, 1471.43701171875, 1472.296630859375, 1473.17431640625, 1474.522216796875, 1475.568359375, 1476.2578125, 1478.016357421875, 1479.24072265625, 1479.89453125, 1481.129150390625, 1482.328125, 1483.418212890625, 1484.348388671875, 1485.339599609375, 1487.158447265625, 1489.0185546875, 1489.97509765625, 1491.116455078125, 1493.62109375, 1494.3095703125, 1495.67138671875, 1496.8056640625, 1497.778564453125, 1499.4267578125, 1500.58740234375, 1501.5986328125, 1502.515380859375, 1503.150634765625, 1505.52978515625, 1506.650390625, 1509.39501953125, 1510.064697265625, 1511.25390625, 1512.375244140625, 1514.4970703125, 1515.572265625, 1516.365966796875, 1517.261474609375, 1518.243408203125, 1519.978271484375, 1521.0517578125, 1521.935791015625, 1523.373046875, 1525.430908203125, 1526.421630859375, 1527.80859375, 1528.66845703125, 1529.704833984375, 1530.9765625, 1532.154296875, 1533.34228515625, 1534.33837890625, 1535.78955078125, 1536.61962890625, 1537.38330078125, 1538.264404296875, 1539.772216796875, 1541.060546875, 1543.270263671875, 1544.21630859375, 1545.323974609375, 1546.343994140625, 1548.144287109375, 1550.567138671875, 1552.367431640625, 1553.1787109375, 1554.52197265625, 1555.35400390625, 1556.703125, 1558.220703125, 1558.984375, 1560.05126953125, 1561.304443359375, 1562.48583984375, 1563.30126953125, 1564.437744140625, 1565.80419921875, 1566.59033203125, 1569.40380859375, 1571.77490234375, 1574.384521484375, 1575.582763671875, 1576.427734375, 1577.588134765625, 1578.650390625, 1580.301513671875, 1581.45458984375, 1582.23974609375, 1583.840087890625, 1585.37548828125, 1586.391357421875, 1588.023193359375, 1589.372802734375, 1591.751953125, 1592.68408203125, 1593.472412109375, 1594.313232421875, 1595.52685546875, 1597.152587890625, 1597.790283203125, 1600.117431640625, 1601.466796875, 1602.48681640625, 1603.661865234375, 1604.74169921875, 1605.48486328125, 1606.282958984375, 1607.375, 1608.64697265625, 1609.382568359375, 1610.311279296875, 1611.2880859375, 1613.010009765625, 1614.29541015625, 1615.360107421875, 1616.46337890625, 1617.11572265625, 1618.2783203125, 1620.237060546875, 1620.877685546875, 1621.755126953125, 1623.65576171875, 1624.597900390625, 1627.211181640625, 1629.283935546875, 1630.5380859375, 1631.3447265625, 1633.7392578125, 1635.309814453125, 1636.988037109375, 1638.052001953125, 1638.941162109375, 1641.2333984375, 1643.456787109375, 1645.15478515625, 1646.756103515625, 1647.59521484375, 1648.4482421875, 1649.614013671875, 1650.31689453125, 1651.225341796875, 1653.120361328125, 1654.56396484375, 1656.172607421875, 1659.06787109375, 1660.4921875, 1662.562744140625, 1666.490234375, 1667.990966796875, 1668.6669921875, 1669.895263671875, 1673.319580078125, 1674.264892578125, 1676.18798828125, 1677.0263671875, 1681.38916015625, 1684.42578125, 1685.05517578125, 1685.8115234375, 1687.7568359375, 1689.33251953125, 1691.2744140625, 1692.242919921875, 1699.79736328125, 1703.167236328125, 1704.076416015625, 1704.9755859375, 1706.415771484375, 1708.31298828125, 1711.287353515625, 1714.760498046875, 1716.608642578125, 1717.6083984375, 1719.567626953125, 1720.648193359375, 1723.835205078125, 1726.537353515625, 1727.319091796875, 1728.208984375, 1729.417724609375, 1730.475830078125, 1732.616455078125, 1734.271728515625, 1736.541259765625, 1737.337158203125, 1738.282958984375, 1738.98193359375, 1740.037353515625, 1741.724853515625, 1743.254638671875, 1745.993408203125, 1750.390625, 1751.103271484375, 1754.5107421875, 1756.341064453125, 1758.35205078125, 1760.322021484375, 1761.417724609375, 1763.494873046875, 1766.391357421875, 1767.47119140625, 1769.859130859375, 1771.068359375, 1772.699951171875, 1773.4228515625, 1774.158935546875, 1775.810302734375, 1777.126220703125, 1778.25439453125, 1779.4228515625, 1783.1669921875, 1783.91943359375, 1789.88671875, 1791.3388671875, 1793.0791015625, 1795.557373046875]), 'retentionTime': 5.8905},\n{'polarity': '+', 'collisionEnergy': 35.0, 'id': '20', 'basePeakIntensity': 301045.0, 'highMz': 905.0, 'msLevel': 2, 'totIonCurrent': 764637.0, 'peaksCount': 43, 'precursorMz': [{'precursorMz': 445.35, 'precursorIntensity': 120053.0}], 'num': '20', 'basePeakMz': 428.905, 'lowMz': 110.0, 'intensity array': makeCA([3071.0, 1259.0, 564.0, 2371.0, 1646.0, 1546.0, 1093.0, 1498.0, 1110.0, 2013.0, 1535.0, 1973.0, 28317.0, 4071.0, 792.0, 2456.0, 3167.0, 1673.0, 216505.0, 30083.0, 2.0, 1192.0, 1273.0, 2070.0, 3120.0, 11655.0, 2124.0, 821.0, 825.0, 4734.0, 3214.0, 1235.0, 6617.0, 4802.0, 3320.0, 301045.0, 101500.0, 666.0, 1892.0, 1301.0, 1923.0, 683.0, 1880.0]), 'm/z array': makeCA([223.08883666992188, 244.08282470703125, 270.891845703125, 277.880859375, 281.1331787109375, 293.664794921875, 311.64837646484375, 312.763916015625, 329.0174560546875, 333.06805419921875, 336.62493896484375, 338.9378662109375, 340.9237060546875, 341.9869384765625, 348.98486328125, 351.067138671875, 354.82891845703125, 357.0274658203125, 358.66326904296875, 359.61871337890625, 360.2332763671875, 370.48370361328125, 382.07147216796875, 383.66082763671875, 385.33001708984375, 386.373291015625, 388.41363525390625, 398.84710693359375, 400.7999267578125, 401.9385986328125, 410.0867919921875, 420.408447265625, 426.13665771484375, 426.94586181640625, 428.072509765625, 428.90478515625, 429.922607421875, 430.8460693359375, 438.67962646484375, 443.957275390625, 444.7640380859375, 446.65692138671875, 531.078369140625]), 'retentionTime': 5.9446666666666665}\n]\n+\n+ms1_spectra = [{'intensity array': makeCA([ 0. , 20.0522 , 29.26406, 30.04175, 20.19221, 11.58895,\n+ 0. ]),\n+ 'm/z array': makeCA([ 2.51263, 82.51282, 82.51301, 82.51321, 82.5134 , 82.51359,\n+ 82.51378]),\n+ 'params': {'BPI': '585566',\n+ 'BPM': '544.2904',\n+ 'RTime': '0.987225',\n+ 'TIC': '3728760',\n+ 'scan': ('1', '1')}},\n+ {'intensity array': makeCA([ 0. , 31.2197 , 37.46051, 44.36585, 49.12939, 44.33195,\n+ 35.1637 , 33.48032, 0. ]),\n+ 'm/z array': makeCA([ 82.6435 , 82.6437 , 82.64389, 82.64408, 82.64427, 82.64447,\n+ 82.64466, 82.64485, 82.64504]),\n+ 'params': {'BPI': '713524',\n+ 'BPM': '544.2904',\n+ 'RTime': '1.32083',\n+ 'TIC': '2694200'}}]\n+\n+ms1_spectra_lists = [{'intensity array': [0., 20.0522, 29.26406, 30.04175, 20.19221, 11.58895, 0.],\n+ 'm/z array': [2.51263, 82.51282, 82.51301, 82.51321, 82.5134, 82.51359, 82.51378],\n+ 'params': {'BPI': '585566',\n+ 'BPM': '544.2904',\n+ 'RTime': '0.987225',\n+ 'TIC': '3728760',\n+ 'scan': ('1', '1')}},\n+ {'intensity array': [0., 31.2197, 37.46051, 44.36585, 49.12939, 44.33195, 35.1637, 33.48032, 0.],\n+ 'm/z array': [82.6435, 82.6437, 82.64389, 82.64408, 82.64427, 82.64447, 82.64466, 82.64485, 82.64504],\n+ 'params': {'BPI': '713524',\n+ 'BPM': '544.2904',\n+ 'RTime': '1.32083',\n+ 'TIC': '2694200'}}]\n+\n+ms1_header = {'CreationDate': 'Sat Jun 03 15:25:10 2017',\n+ 'Extractor version': 'Xcalibur',\n+ 'Extractor': 'ProteoWizard',\n+ 'Source file': 'Set 1. B2 at 193 nm RT.RAW'}\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -4,7 +4,7 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\n-from pyteomics.mgf import *\n+from pyteomics.mgf import read, write, read_header\nimport data\nclass MGFTest(unittest.TestCase):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fixes in ms1; read_header still fails test |
377,522 | 26.06.2017 15:51:26 | -10,800 | 7fd5a7da34bf2aa5a7d274e0612f10761a9a54e1 | Add actual test files | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test.ms1",
"diff": "+H CreationDate Sat Jun 03 15:25:10 2017\n+H Extractor ProteoWizard\n+H Extractor version Xcalibur\n+H Source file Set 1. B2 at 193 nm RT.RAW\n+S 1 1\n+I RTime 0.987225\n+I BPI 585566\n+I BPM 544.2904\n+I TIC 3728760\n+2.51263 0\n+82.51282 20.0522\n+82.51301 29.26406\n+82.51321 30.04175\n+82.5134 20.19221\n+82.51359 11.58895\n+82.51378 0\n+S 2 2\n+I RTime 1.32083\n+I BPI 713524\n+I BPM 544.2904\n+I TIC 2694200\n+82.6435 0\n+82.6437 31.2197\n+82.64389 37.46051\n+82.64408 44.36585\n+82.64427 49.12939\n+82.64447 44.33195\n+82.64466 35.1637\n+82.64485 33.48032\n+82.64504 0\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test_ms1.py",
"diff": "+from os import path\n+import numpy as np\n+import pyteomics\n+pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+import unittest\n+from pyteomics.ms1 import read, read_header\n+import data\n+\n+class MS1Test(unittest.TestCase):\n+ maxDiff = None\n+ def setUp(self):\n+ self.path = 'test.ms1'\n+ self.header = read_header(self.path)\n+ self.spectra = list(read(self.path))\n+ self.ns = len(self.spectra)\n+\n+ def test_read(self):\n+ # http://stackoverflow.com/q/14246983/1258041\n+ self.assertEqual(data.ms1_spectra, list(read(self.path)))\n+ with read(self.path) as reader:\n+ self.assertEqual(data.ms1_spectra, list(reader))\n+\n+ def test_read_array_conversion(self):\n+ with read(self.path, convert_arrays=False) as reader:\n+ self.assertEqual(data.ms1_spectra_lists, list(reader))\n+ with read(self.path, convert_arrays=True) as reader:\n+ s = next(reader)\n+ self.assertTrue(isinstance(s['m/z array'], np.ndarray))\n+\n+ def test_header(self):\n+ self.assertEqual(self.header, data.ms1_header)\n+\n+ def test_read_dtype(self):\n+ dtypes = {'m/z array': np.float32, 'intensity array': np.int32}\n+ with read(self.path, dtype=dtypes) as f:\n+ for spec in f:\n+ for k, v in dtypes.items():\n+ self.assertEqual(spec[k].dtype, v)\n+\n+if __name__ == \"__main__\":\n+ unittest.main()\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add actual test files |
377,522 | 27.06.2017 18:47:46 | -10,800 | 8ef13c075bcdbb344a27127876f5e1f06ae97e3c | MS1 test works | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/ms1.py",
"new_path": "pyteomics/ms1.py",
"diff": "@@ -135,8 +135,8 @@ def read(source=None, use_header=False, convert_arrays=2, read_charges=True, dty\nintensities.append(float(sline[1])) # exceptions...\\\nexcept ValueError:\nraise aux.PyteomicsError(\n- 'Error when parsing %s. Line:\\n%s' %\n- (source, line))\n+ 'Error when parsing %s. Line: %s' %\n+ (source.name, line))\nexcept IndexError:\npass\n@@ -162,9 +162,11 @@ def read_header(source):\nwith aux._file_obj(source, 'r') as source:\nheader = {}\nfor line in source:\n- l = line.split(None, 2)\n- if l[0] != 'H':\n+ if line[0] != 'H':\nbreak\n+ l = line.split('\\t', 2)\n+ if len(l) < 3:\n+ l = line.split(None, 2)\nkey = l[1]\nval = l[2].strip()\nheader[key] = val\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | MS1 test works |
377,522 | 02.07.2017 14:56:59 | -10,800 | 5b6acbd2e2872dd633e09b067bafc05da054fb6e | Change url in pkgbuild to readthedocs | [
{
"change_type": "MODIFY",
"old_path": "PKGBUILD",
"new_path": "PKGBUILD",
"diff": "@@ -4,7 +4,7 @@ pkgver=3.4.2\npkgrel=1\npkgdesc=\"A framework for proteomics data analysis.\"\narch=('any')\n-url=\"http://pythonhosted.org/pyteomics\"\n+url=\"http://pyteomics.readthedocs.io/\"\nlicense=('Apache')\ndepends=('python' 'python-setuptools')\noptdepends=('python-matplotlib: for pylab_aux module'\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Change url in pkgbuild to readthedocs |
377,522 | 05.07.2017 16:09:08 | -10,800 | 5a37e2dddf955885985770fcbc58bbd7a54b5621 | Fix warning formatting problem for Python 3 | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -29,7 +29,7 @@ This module requres :py:mod:`lxml` and :py:mod:`numpy`.\nimport re\nimport warnings\n-warnings.formatwarning = lambda msg, *args: str(msg) + '\\n'\n+warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\nimport socket\nfrom functools import wraps\nfrom traceback import format_exc\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix warning formatting problem for Python 3 |
377,522 | 05.07.2017 18:45:39 | -10,800 | a54b45ab5b08740046709e543aab638267e84330 | Remove unnecessary concat in filter_df | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -392,6 +392,7 @@ def filter_df(*args, **kwargs):\nDefault is 'expect'.\nis_decoy : str / iterable / callable, optional\nDefault is to check if all strings in the \"protein\" column start with `'DECOY_'`\n+\n*args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\nReturns\n@@ -402,7 +403,10 @@ def filter_df(*args, **kwargs):\nsep = kwargs.get('sep')\nkwargs.setdefault('key', 'expect')\nif all(isinstance(arg, pd.DataFrame) for arg in args):\n+ if len(args) > 1:\ndf = pd.concat(args)\n+ else:\n+ df = args[0]\nelse:\nread_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -323,7 +323,10 @@ def filter_df(*args, **kwargs):\nsep = kwargs.get('sep')\nkwargs.setdefault('key', 'expect')\nif all(isinstance(arg, pd.DataFrame) for arg in args):\n+ if len(args) > 1:\ndf = pd.concat(args)\n+ else:\n+ df = args[0]\nelse:\nread_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove unnecessary concat in filter_df |
377,522 | 26.08.2017 00:47:45 | -10,800 | 56647336022f72c1e4110ca63004dfbe51b9c26b | Make _write_byte_offsets public | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -1046,7 +1046,7 @@ class IndexSavingXML(IndexedXML):\nindex = PrebuiltOffsetIndex(self._load_byte_index_from_file(f))\nself._offset_index = index\n- def _write_byte_offsets(self):\n+ def write_byte_offsets(self):\n\"\"\"Write the byte offsets in :attr:`_offset_index` to the file\nat :attr:`_byte_offset_filename`\n\"\"\"\n@@ -1076,7 +1076,7 @@ class IndexSavingXML(IndexedXML):\nThe path to the file to parse\n\"\"\"\nwith cls(path, use_index=True) as inst:\n- inst._write_byte_offsets()\n+ inst.write_byte_offsets()\nclass ArrayConversionMixin(object):\n_dtype_dict = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -46,7 +46,7 @@ class MzmlTest(unittest.TestCase):\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.FlatTagSpecificXMLByteIndex))\nself.assertTrue(not isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\n- inst._source.close()\n+ # inst._source.close()\nself.assertTrue(inst._source.closed)\nMzML.prebuild_byte_offset_file(work_path)\nwith MzML(work_path, use_index=True) as inst:\n@@ -54,7 +54,7 @@ class MzmlTest(unittest.TestCase):\nself.assertTrue(offsets_exist)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\n- inst._source.close()\n+ # inst._source.close()\nself.assertTrue(inst._source.closed)\nos.remove(inst._byte_offset_filename)\nwith MzML(work_path, use_index=True) as inst:\n@@ -62,7 +62,7 @@ class MzmlTest(unittest.TestCase):\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.FlatTagSpecificXMLByteIndex))\nself.assertTrue(not isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\n- inst._source.close()\n+ # inst._source.close()\nself.assertTrue(inst._source.closed)\nshutil.rmtree(test_dir, True)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzxml.py",
"new_path": "tests/test_mzxml.py",
"diff": "@@ -33,7 +33,7 @@ class MzXMLTest(unittest.TestCase):\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.FlatTagSpecificXMLByteIndex))\nself.assertTrue(not isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\n- inst._source.close()\n+ # inst._source.close()\nself.assertTrue(inst._source.closed)\nMzXML.prebuild_byte_offset_file(work_path)\nwith MzXML(work_path, use_index=True) as inst:\n@@ -41,7 +41,7 @@ class MzXMLTest(unittest.TestCase):\nself.assertTrue(offsets_exist)\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\n- inst._source.close()\n+ # inst._source.close()\nself.assertTrue(inst._source.closed)\nos.remove(inst._byte_offset_filename)\nwith MzXML(work_path, use_index=True) as inst:\n@@ -49,7 +49,7 @@ class MzXMLTest(unittest.TestCase):\nself.assertEqual(offsets_exist, inst._check_has_byte_offset_file())\nself.assertTrue(isinstance(inst._offset_index, xml.FlatTagSpecificXMLByteIndex))\nself.assertTrue(not isinstance(inst._offset_index, xml.PrebuiltOffsetIndex))\n- inst._source.close()\n+ # inst._source.close()\nself.assertTrue(inst._source.closed)\nshutil.rmtree(test_dir, True)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make _write_byte_offsets public |
377,522 | 29.08.2017 02:15:19 | -10,800 | 7a6258f83de5ff50d562d51232a19c956a14053a | Add decode_binary in docstring, set read_schema to False | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -117,7 +117,7 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nReturns\n-------\n- str\n+ out : str\nThe name for this array entry\n\"\"\"\n# If this is a non-standard array, we hope the userParams\n@@ -162,7 +162,7 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\n# arrays before falling back to just guessing.\nelse:\nimport warnings\n- warnings.warn(\"Multiple options for naming binary array: %r\" % (candidates))\n+ warnings.warn(\"Multiple options for naming binary array: %r\" % candidates)\nstandard_options = set(candidates) & STANDARD_ARRAYS\nif standard_options:\nreturn max(standard_options, key=len)\n@@ -215,7 +215,7 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nReturns\n-------\n- dict\n+ out : dict\nThe processed and flattened data array and metadata\n\"\"\"\nif not self.decode_binary:\n@@ -263,7 +263,7 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\ninfo[k] = int(info[k])\nreturn info\n-def read(source, read_schema=True, iterative=True, use_index=False, dtype=None):\n+def read(source, read_schema=False, iterative=True, use_index=False, dtype=None):\n\"\"\"Parse `source` and iterate through spectra.\nParameters\n@@ -273,8 +273,8 @@ def read(source, read_schema=True, iterative=True, use_index=False, dtype=None):\nread_schema : bool, optional\nIf :py:const:`True`, attempt to extract information from the XML schema\n- mentioned in the mzML header (default). Otherwise, use default\n- parameters. Disable this to avoid waiting on slow network connections or\n+ mentioned in the mzML header. Otherwise, use default parameters.\n+ Not recommended without Internet connection or\nif you don't like to get the related warnings.\niterative : bool, optional\n@@ -290,6 +290,11 @@ def read(source, read_schema=True, iterative=True, use_index=False, dtype=None):\ndtype to convert arrays to, one for both m/z and intensity arrays or one for each key.\nIf :py:class:`dict`, keys should be 'm/z array' and 'intensity array'.\n+ decode_binary : bool, optional\n+ Defines whether binary data should be decoded and included in the output\n+ (under \"m/z array\", \"intensity array\", etc.).\n+ Default is :py:const:`True`.\n+\nReturns\n-------\nout : iterator\n@@ -335,10 +340,15 @@ def iterfind(source, path, **kwargs):\nread_schema : bool, optional\nIf :py:const:`True`, attempt to extract information from the XML schema\n- mentioned in the mzIdentML header (default). Otherwise, use default\n- parameters. Disable this to avoid waiting on slow network connections or\n+ mentioned in the mzIdentML header. Otherwise, use default\n+ parameters. Not recommended without Internet connection or\nif you don't like to get the related warnings.\n+ decode_binary : bool, optional\n+ Defines whether binary data should be decoded and included in the output\n+ (under \"m/z array\", \"intensity array\", etc.).\n+ Default is :py:const:`True`.\n+\nReturns\n-------\nout : iterator\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzxml.py",
"new_path": "pyteomics/mzxml.py",
"diff": "@@ -186,7 +186,7 @@ class MzXML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nyield item\n-def read(source, read_schema=True, iterative=True, use_index=False, dtype=None):\n+def read(source, read_schema=False, iterative=True, use_index=False, dtype=None):\n\"\"\"Parse `source` and iterate through spectra.\nParameters\n@@ -196,8 +196,8 @@ def read(source, read_schema=True, iterative=True, use_index=False, dtype=None):\nread_schema : bool, optional\nIf :py:const:`True`, attempt to extract information from the XML schema\n- mentioned in the mzML header (default). Otherwise, use default\n- parameters. Disable this to avoid waiting on slow network connections or\n+ mentioned in the mzML header. Otherwise, use default\n+ parameters. Not recommended without Internet connection or\nif you don't like to get the related warnings.\niterative : bool, optional\n@@ -209,6 +209,11 @@ def read(source, read_schema=True, iterative=True, use_index=False, dtype=None):\nDefines whether an index of byte offsets needs to be created for\nspectrum elements. Default is :py:const:`False`.\n+ decode_binary : bool, optional\n+ Defines whether binary data should be decoded and included in the output\n+ (under \"m/z array\", \"intensity array\", etc.).\n+ Default is :py:const:`True`.\n+\nReturns\n-------\nout : iterator\n@@ -259,6 +264,11 @@ def iterfind(source, path, **kwargs):\nparameters. Disable this to avoid waiting on slow network connections or\nif you don't like to get the related warnings.\n+ decode_binary : bool, optional\n+ Defines whether binary data should be decoded and included in the output\n+ (under \"m/z array\", \"intensity array\", etc.).\n+ Default is :py:const:`True`.\n+\nReturns\n-------\nout : iterator\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -193,7 +193,7 @@ class PepXML(xml.XML):\ninfo['search_hit'].sort(key=lambda x: x['hit_rank'])\nreturn info\n-def read(source, read_schema=True, iterative=True, **kwargs):\n+def read(source, read_schema=False, iterative=True, **kwargs):\n\"\"\"Parse `source` and iterate through peptide-spectrum matches.\nParameters\n@@ -203,8 +203,8 @@ def read(source, read_schema=True, iterative=True, **kwargs):\nread_schema : bool, optional\nIf :py:const:`True`, attempt to extract information from the XML schema\n- mentioned in the pepXML header (default). Otherwise, use default\n- parameters. Disable this to avoid waiting on slow network connections or\n+ mentioned in the pepXML header. Otherwise, use default parameters.\n+ Not recommended without Internet connection or\nif you don't like to get the related warnings.\niterative : bool, optional\n@@ -256,8 +256,8 @@ def iterfind(source, path, **kwargs):\nread_schema : bool, optional\nIf :py:const:`True`, attempt to extract information from the XML schema\n- mentioned in the mzIdentML header (default). Otherwise, use default\n- parameters. Disable this to avoid waiting on slow network connections or\n+ mentioned in the mzIdentML header. Otherwise, use default parameters.\n+ Not recommended without Internet connection or\nif you don't like to get the related warnings.\nReturns\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -210,12 +210,6 @@ def iterfind(source, path, **kwargs):\nhighly recommended to disable iterative parsing if possible.\nDefault value is :py:const:`True`.\n- read_schema : bool, optional\n- If :py:const:`True`, attempt to extract information from the XML schema\n- mentioned in the mzIdentML header (default). Otherwise, use default\n- parameters. Disable this to avoid waiting on slow network connections or\n- if you don't like to get the related warnings.\n-\nReturns\n-------\nout : iterator\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -142,7 +142,7 @@ class XML(FileReader):\ndef _get_info_smart(self, element, **kwargs):\nraise NotImplementedError\n- def __init__(self, source, read_schema=True,\n+ def __init__(self, source, read_schema=False,\niterative=True, build_id_cache=False, **kwargs):\n\"\"\"Create an XML parser object.\n@@ -153,7 +153,7 @@ class XML(FileReader):\nread_schema : bool, optional\nDefines whether schema file referenced in the file header\nshould be used to extract information about value conversion.\n- Default is :py:const:`True`.\n+ Default is :py:const:`False`.\niterative : bool, optional\nDefines whether an :py:class:`ElementTree` object should be\nconstructed and stored on the instance or if iterative parsing\n@@ -871,7 +871,7 @@ class IndexedXML(XML):\nread_schema : bool, optional\nDefines whether schema file referenced in the file header\nshould be used to extract information about value conversion.\n- Default is :py:const:`True`.\n+ Default is :py:const:`False`.\niterative : bool, optional\nDefines whether an :py:class:`ElementTree` object should be\nconstructed and stored on the instance or if iterative parsing\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add decode_binary in docstring, set read_schema to False |
377,522 | 01.09.2017 18:27:59 | -10,800 | bc3383f143511fc55cfc8b9262302b57a558db9d | Make mzid use index saving; add try/except around type conversion in xml._get_info | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+3.4.3\n+-----\n+\n+ - Change the default value for `read_schema` to :py:const:`False` in\n+ XML parsing modules.\n+\n+ - Add optional `decode_binary` argument in\n+ :py:class:`pyteomics.mzml.MzML` and :py:class:`pyteomics.mzxml.MzXML`.\n+\n+ - Add method :py:meth:`write_byte_offsets` in :py:class:`pyteomics.mzml.MzML`,\n+ :py:class:`pyteomics.mzxml.MzXML` and :py:class:`pyteomics.mzid.MzIdentML`.\n+\n+ - Minor fixes.\n+\n3.4.2\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "PKGBUILD",
"new_path": "PKGBUILD",
"diff": "@@ -4,7 +4,7 @@ pkgver=3.4.2\npkgrel=1\npkgdesc=\"A framework for proteomics data analysis.\"\narch=('any')\n-url=\"http://pyteomics.readthedocs.io/\"\n+url=\"http://pythonhosted.org/pyteomics/\"\nlicense=('Apache')\ndepends=('python' 'python-setuptools')\noptdepends=('python-matplotlib: for pylab_aux module'\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.4.2\n\\ No newline at end of file\n+3.4.3a0\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -100,7 +100,7 @@ warnings.formatwarning = lambda msg, *args: str(msg) + '\\n'\nfrom . import auxiliary as aux\nfrom . import xml\n-class MzIdentML(xml.IndexedXML):\n+class MzIdentML(xml.IndexSavingXML):\n\"\"\"Parser class for MzIdentML files.\"\"\"\nfile_format = 'mzIdentML'\n_root_element = 'MzIdentML'\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -179,6 +179,7 @@ class XML(FileReader):\nself._id_dict = None\nself.version_info = self._get_version_info()\n+ self._read_schema = read_schema\nself.schema_info = self._get_schema_info(read_schema)\nself._converters_items = self._converters.items()\n@@ -351,10 +352,16 @@ class XML(FileReader):\nreturn stext\n# convert types\n+ try:\nfor k, v in info.items():\nfor t, a in self._converters_items:\nif t in schema_info and (name, k) in schema_info[t]:\ninfo[k] = a(v)\n+ except ValueError as e:\n+ message = 'Error when converting types: {}'.format(e.args)\n+ if not self._read_schema:\n+ message += '\\nTry reading the file with read_schema=True'\n+ raise PyteomicsError(message)\n# resolve refs\nif kwargs.get('retrieve_refs'):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make mzid use index saving; add try/except around type conversion in xml._get_info |
377,522 | 20.10.2017 17:31:26 | -10,800 | ce249fda929e5150888863f20964016dc8137e80 | Fix for multiple values lost in param tags | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -330,6 +330,11 @@ class XML(FileReader):\nif cname in {'cvParam', 'userParam', 'UserParam'}:\nnewinfo = self._handle_param(child, **kwargs)\nif not ('name' in info and 'name' in newinfo):\n+ for key in set(info) & set(newinfo):\n+ if isinstance(info[key], list):\n+ info[key].append(newinfo.pop(key))\n+ else:\n+ info[key] = [info[key], newinfo.pop(key)]\ninfo.update(newinfo)\nelse:\nif not isinstance(info['name'], list):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix for multiple values lost in param tags |
377,522 | 20.10.2017 18:13:52 | -10,800 | 99d05207c0dbd2424ab0a88201e10750f869e6ed | Refactor in mgf | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.4.3a0\n\\ No newline at end of file\n+3.4.3a1\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -114,23 +114,26 @@ def read(source=None, use_header=True, convert_arrays=2, read_charges=True, dtyp\nraise aux.PyteomicsError('numpy is required for array conversion')\ndtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in _array_keys}\nheader = read_header(source)\n- reading_spectrum = False\n- params = {}\n+\n+ for line in source:\n+ sline = line.strip()\n+ if sline == 'BEGIN IONS':\n+ spectrum = _read_spectrum(source, header if use_header else {}, convert_arrays, read_charges, dtype_dict)\n+ yield spectrum\n+ # otherwise we are not interested; do nothing, just move along\n+\n+\n+def _read_spectrum(source, header, convert_arrays, read_charges, dtype_dict):\nmasses = []\nintensities = []\ncharges = []\n- if use_header: params.update(header)\n+ params = header.copy()\n+\nfor line in source:\nsline = line.strip()\n- if not reading_spectrum:\n- if sline == 'BEGIN IONS':\n- reading_spectrum = True\n- # otherwise we are not interested; do nothing, just move along\n- else:\nif not sline or sline[0] in _comments:\npass\nelif sline == 'END IONS':\n- reading_spectrum = False\nif 'pepmass' in params:\ntry:\npepmass = tuple(map(float, params['pepmass'].split()))\n@@ -147,12 +150,8 @@ def read(source=None, use_header=True, convert_arrays=2, read_charges=True, dtyp\ndata['charge array'] = charges\nfor key, values in data.items():\nout[key] = _array_converters[key][convert_arrays](values, dtype=dtype_dict.get(key))\n- yield out\n- del out\n- params = dict(header) if use_header else {}\n- masses = []\n- intensities = []\n- charges = []\n+ return out\n+\nelse:\nif '=' in sline: # spectrum-specific parameters!\nl = sline.split('=', 1)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Refactor in mgf |
377,522 | 21.10.2017 16:51:39 | -10,800 | ac983a814b5de955979f3e5b9e2a3609de52df6c | Add MGF class | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary.py",
"new_path": "pyteomics/auxiliary.py",
"diff": "@@ -412,6 +412,20 @@ def _keepstate(func):\nreturn res\nreturn wrapped\n+def _keepstate_method(func):\n+ \"\"\"Decorator for :py:class:`FileReader` methods to help keep the position\n+ in the underlying file.\n+ \"\"\"\n+ @wraps(func)\n+ def wrapped(self, *args, **kwargs):\n+ position = self.tell()\n+ self.seek(0)\n+ try:\n+ return func(self, *args, **kwargs)\n+ finally:\n+ self.seek(position)\n+ return wrapped\n+\nclass _file_obj(object):\n\"\"\"Check if `f` is a file name and open the file in `mode`.\nA context manager.\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -17,6 +17,12 @@ Also, common parameters can be read from MGF file header with\n:py:func:`read_header` function. :py:func:`write` allows creation of MGF\nfiles.\n+Classes\n+-------\n+\n+ :py:class:`MGF` - a class representing an MGF file. Use it to read spectra\n+ from a file consecutively or by title.\n+\nFunctions\n---------\n@@ -59,6 +65,7 @@ except ImportError:\nnp = None\nimport itertools as it\n+class MGF(aux.FileReader):\n_comments = set('#;!/')\n_array = (lambda x, dtype: np.array(x, dtype=dtype)) if np is not None else None\n_ma = (lambda x, dtype: np.ma.masked_equal(np.array(x, dtype=dtype), 0)) if np is not None else None\n@@ -70,8 +77,115 @@ _array_converters = {\n}\n_array_keys = ['m/z array', 'intensity array', 'charge array']\n-@aux._file_reader()\n-def read(source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\n+ super(MGF, self).__init__(source, 'r', self._read, False, (), {}, encoding)\n+ self._use_header = use_header\n+ self._convert_arrays = convert_arrays\n+ if self._convert_arrays and np is None:\n+ raise aux.PyteomicsError('numpy is required for array conversion')\n+ self._read_charges = read_charges\n+ self._dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in self._array_keys}\n+ if self._use_header:\n+ self._read_header()\n+ else:\n+ self._header = None\n+\n+ @property\n+ def header(self):\n+ if self._header is None:\n+ self._read_header()\n+ return self._header\n+\n+ @aux._keepstate_method\n+ def _read_header(self):\n+ header = {}\n+ for line in self._source:\n+ if line.strip() == 'BEGIN IONS':\n+ break\n+ l = line.split('=')\n+ if len(l) == 2:\n+ key = l[0].lower()\n+ val = l[1].strip()\n+ header[key] = val\n+ if 'charge' in header:\n+ header['charge'] = aux._parse_charge(header['charge'], True)\n+ self._header = header\n+\n+ def _read(self, **kwargs):\n+ for line in self._source:\n+ sline = line.strip()\n+ if sline == 'BEGIN IONS':\n+ spectrum = self._read_spectrum()\n+ yield spectrum\n+ # otherwise we are not interested; do nothing, just move along\n+\n+ def _read_spectrum(self):\n+ \"\"\"Read a single spectrum from ``self._source``.\n+\n+ Returns\n+ -------\n+ out : dict\n+ \"\"\"\n+ masses = []\n+ intensities = []\n+ charges = []\n+\n+ params = self.header.copy() if self._use_header else {}\n+\n+ for line in self._source:\n+ sline = line.strip()\n+ if not sline or sline[0] in self._comments:\n+ pass\n+ elif sline == 'END IONS':\n+ if 'pepmass' in params:\n+ try:\n+ pepmass = tuple(map(float, params['pepmass'].split()))\n+ except ValueError:\n+ raise aux.PyteomicsError('MGF format error: cannot parse '\n+ 'PEPMASS = {}'.format(params['pepmass']))\n+ else:\n+ params['pepmass'] = pepmass + (None,)*(2-len(pepmass))\n+ if isinstance(params.get('charge'), str):\n+ params['charge'] = aux._parse_charge(params['charge'], True)\n+ out = {'params': params}\n+ data = {'m/z array': masses, 'intensity array': intensities}\n+ if self._read_charges:\n+ data['charge array'] = charges\n+ for key, values in data.items():\n+ out[key] = self._array_converters[key][self._convert_arrays](values, dtype=self._dtype_dict.get(key))\n+ return out\n+\n+ else:\n+ if '=' in sline: # spectrum-specific parameters!\n+ l = sline.split('=', 1)\n+ params[l[0].lower()] = l[1].strip()\n+ else: # this must be a peak list\n+ l = sline.split()\n+ try:\n+ masses.append(float(l[0]))\n+ intensities.append(float(l[1]))\n+ if self._read_charges:\n+ charges.append(aux._parse_charge(l[2]) if len(l) > 2 else 0)\n+ except ValueError:\n+ raise aux.PyteomicsError(\n+ 'Error when parsing %s. Line:\\n%s' % (getattr(self._source, 'name', 'MGF file'), line))\n+ except IndexError:\n+ pass\n+\n+ @aux._keepstate_method\n+ def get_spectrum(self, title):\n+ self.reset()\n+ for line in self._source:\n+ sline = line.strip()\n+ if sline[:5] == 'TITLE' and sline.split('=', 1)[1].strip() == title:\n+ spectrum = self._read_spectrum()\n+ spectrum['params']['title'] = title\n+ return spectrum\n+\n+ __getitem__ = get_spectrum\n+\n+\n+def read(*args, **kwargs):\n\"\"\"Read an MGF file and return entries iteratively.\nRead the specified MGF file, **yield** spectra one by one.\n@@ -107,22 +221,15 @@ def read(source=None, use_header=True, convert_arrays=2, read_charges=True, dtyp\ndtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\nKeys should be 'm/z array', 'intensity array' and/or 'charge array'.\n+ encoding : str, optional\n+ Encoding to read the files in. Default is UTF-8.\n+\nReturns\n-------\n- out : FileReader\n+ out : MGF\n\"\"\"\n- if convert_arrays and np is None:\n- raise aux.PyteomicsError('numpy is required for array conversion')\n- dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in _array_keys}\n- header = read_header(source)\n-\n- for line in source:\n- sline = line.strip()\n- if sline == 'BEGIN IONS':\n- spectrum = _read_spectrum(source, header if use_header else {}, convert_arrays, read_charges, dtype_dict)\n- yield spectrum\n- # otherwise we are not interested; do nothing, just move along\n+ return MGF(*args, **kwargs)\ndef get_spectrum(source, title, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n\"\"\"Read one spectrum (with given `title`) from `source`.\n@@ -147,78 +254,10 @@ def get_spectrum(source, title, use_header=True, convert_arrays=2, read_charges=\nA dict with the spectrum, if it is found, and None otherwise.\n\"\"\"\n- with aux._file_obj(source, 'r') as source:\n- if convert_arrays and np is None:\n- raise aux.PyteomicsError('numpy is required for array conversion')\n- dtype_dict = dtype if isinstance(dtype, dict) else {k: dtype for k in _array_keys}\n- header = read_header(source)\n- for line in source:\n- sline = line.strip()\n- if sline[:5] == 'TITLE' and sline.split('=', 1)[1].strip() == title:\n- spectrum = _read_spectrum(source, header if use_header else {}, convert_arrays, read_charges, dtype_dict)\n- spectrum['params']['title'] = title\n- return spectrum\n-\n-def _read_spectrum(source, header, convert_arrays, read_charges, dtype_dict):\n- \"\"\"Read a single spectrum from ``source``.\n-\n- Parameters\n- ----------\n- source : file\n- header : dict\n- convert_arrays : bool\n- read_charges : bool\n- dtype_dict : dict\n-\n- Returns\n- -------\n- out : dict\n- \"\"\"\n- masses = []\n- intensities = []\n- charges = []\n- params = header.copy()\n+ with MGF(source, use_header=use_header, convert_arrays=convert_arrays,\n+ read_charges=read_charges, dtype=dtype) as f:\n+ return f[title]\n- for line in source:\n- sline = line.strip()\n- if not sline or sline[0] in _comments:\n- pass\n- elif sline == 'END IONS':\n- if 'pepmass' in params:\n- try:\n- pepmass = tuple(map(float, params['pepmass'].split()))\n- except ValueError:\n- raise aux.PyteomicsError('MGF format error: cannot parse '\n- 'PEPMASS = {}'.format(params['pepmass']))\n- else:\n- params['pepmass'] = pepmass + (None,)*(2-len(pepmass))\n- if isinstance(params.get('charge'), str):\n- params['charge'] = aux._parse_charge(params['charge'], True)\n- out = {'params': params}\n- data = {'m/z array': masses, 'intensity array': intensities}\n- if read_charges:\n- data['charge array'] = charges\n- for key, values in data.items():\n- out[key] = _array_converters[key][convert_arrays](values, dtype=dtype_dict.get(key))\n- return out\n-\n- else:\n- if '=' in sline: # spectrum-specific parameters!\n- l = sline.split('=', 1)\n- params[l[0].lower()] = l[1].strip()\n- else: # this must be a peak list\n- l = sline.split()\n- try:\n- masses.append(float(l[0])) # this may cause\n- intensities.append(float(l[1])) # exceptions...\\\n- if read_charges:\n- charges.append(aux._parse_charge(l[2]) if len(l) > 2 else 0)\n- except ValueError:\n- raise aux.PyteomicsError(\n- 'Error when parsing %s. Line:\\n%s' %\n- (source, line))\n- except IndexError:\n- pass\n@aux._keepstate\ndef read_header(source):\n@@ -361,7 +400,7 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nhead_dict = {}\nfor line in head_lines:\nif not line.strip() or any(\n- line.startswith(c) for c in _comments):\n+ line.startswith(c) for c in MGF._comments):\ncontinue\nl = line.split('=')\nif len(l) == 2:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -42,6 +42,7 @@ from lxml import etree\nfrom collections import OrderedDict, defaultdict\nfrom .auxiliary import FileReader, PyteomicsError, basestring, _file_obj\nfrom .auxiliary import unitint, unitfloat, unitstr\n+from .auxiliary import _keepstate_method as _keepstate\ntry: # Python 2.7\nfrom urllib2 import urlopen, URLError\nexcept ImportError: # Python 3.x\n@@ -55,21 +56,6 @@ def _local_name(element):\nreturn element.tag\n-def _keepstate(func):\n- \"\"\"Decorator for :py:class:`XML` methods to help keep the position\n- in the underlying file.\n- \"\"\"\n- @wraps(func)\n- def wrapped(self, *args, **kwargs):\n- position = self.tell()\n- self.seek(0)\n- try:\n- return func(self, *args, **kwargs)\n- finally:\n- self.seek(position)\n- return wrapped\n-\n-\nclass XMLValueConverter(object):\n# Adapted from http://stackoverflow.com/questions/2764269/parsing-an-xsduration-datatype-into-a-python-datetime-timedelta-object\n_duration_parser = re.compile(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -4,7 +4,7 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\n-from pyteomics.mgf import read, write, read_header\n+from pyteomics.mgf import read, write, read_header, MGF\nimport data\nclass MGFTest(unittest.TestCase):\n@@ -25,12 +25,13 @@ class MGFTest(unittest.TestCase):\nself.tmpfile.close()\ndef test_read(self):\n+ for func in [read, MGF]:\n# http://stackoverflow.com/q/14246983/1258041\n- self.assertEqual(data.mgf_spectra_long, list(read(self.path)))\n- self.assertEqual(data.mgf_spectra_short, list(read(self.path, False)))\n- with read(self.path) as reader:\n+ self.assertEqual(data.mgf_spectra_long, list(func(self.path)))\n+ self.assertEqual(data.mgf_spectra_short, list(func(self.path, False)))\n+ with func(self.path) as reader:\nself.assertEqual(data.mgf_spectra_long, list(reader))\n- with read(self.path, False) as reader:\n+ with func(self.path, False) as reader:\nself.assertEqual(data.mgf_spectra_short, list(reader))\ndef test_read_no_charges(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add MGF class |
377,522 | 24.11.2017 16:58:56 | -10,800 | b8734610869f06b8d268c0b2d781e605949f5323 | Remove universal import in parser test | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary.py",
"new_path": "pyteomics/auxiliary.py",
"diff": "@@ -488,11 +488,11 @@ class IteratorContextManager(object):\nreturn self\ndef __next__(self):\n- try:\n+ # try:\nreturn next(self._reader)\n- except StopIteration:\n- self.__exit__(None, None, None)\n- raise\n+ # except StopIteration:\n+ # self.__exit__(None, None, None)\n+ # raise\nnext = __next__\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -2,7 +2,7 @@ from os import path\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport unittest\n-from pyteomics.parser import *\n+from pyteomics import parser\nfrom string import ascii_uppercase as uppercase\nimport random\nclass ParserTest(unittest.TestCase):\n@@ -18,41 +18,41 @@ class ParserTest(unittest.TestCase):\ndef test_parse_simple(self):\nfor seq in self.simple_sequences:\n- self.assertEqual(seq, ''.join(parse(seq, labels=uppercase)))\n+ self.assertEqual(seq, ''.join(parser.parse(seq, labels=uppercase)))\ndef test_parse(self):\nself.assertEqual(\n[('P',), ('E',), ('P',), ('T',), ('I',), ('D',), ('E',)],\n- parse('PEPTIDE', split=True))\n+ parser.parse('PEPTIDE', split=True))\nself.assertEqual(['P', 'E', 'P', 'T', 'I', 'D', 'E'],\n- parse('H-PEPTIDE'))\n+ parser.parse('H-PEPTIDE'))\nfor seq in ['PEPTIDE', 'H-PEPTIDE', 'PEPTIDE-OH', 'H-PEPTIDE-OH']:\nself.assertEqual(['H-', 'P', 'E', 'P', 'T', 'I', 'D', 'E', '-OH'],\n- parse(seq, show_unmodified_termini=True))\n+ parser.parse(seq, show_unmodified_termini=True))\nself.assertEqual(['T', 'E', 'pS', 'T', 'oxM'],\n- parse('TEpSToxM', labels=std_labels + ['pS', 'oxM']))\n+ parser.parse('TEpSToxM', labels=parser.std_labels + ['pS', 'oxM']))\nself.assertEqual(\n[('H-', 'z', 'P'), ('E',), ('P',), ('z', 'T'), ('I',), ('D',),\n('z', 'E', '-OH')],\n- parse(\n- 'zPEPzTIDzE', True, True, labels=std_labels+['z']))\n+ parser.parse(\n+ 'zPEPzTIDzE', True, True, labels=parser.std_labels+['z']))\ndef test_tostring(self):\nfor seq in self.simple_sequences:\n- self.assertEqual(seq, tostring(parse(seq, labels=uppercase)))\n- self.assertEqual(seq, tostring(parse(\n+ self.assertEqual(seq, parser.tostring(parser.parse(seq, labels=uppercase)))\n+ self.assertEqual(seq, parser.tostring(parser.parse(\nseq, True, True, labels=uppercase), False))\ndef test_amino_acid_composition_simple(self):\nfor seq in self.simple_sequences:\n- comp = amino_acid_composition(seq, labels=uppercase)\n+ comp = parser.amino_acid_composition(seq, labels=uppercase)\nfor aa in set(seq):\nself.assertEqual(seq.count(aa), comp[aa])\ndef test_amino_acid_composition(self):\nfor seq in self.simple_sequences:\n- comp = amino_acid_composition(seq, term_aa=True, labels=uppercase)\n- comp_default = amino_acid_composition(seq, labels=uppercase)\n+ comp = parser.amino_acid_composition(seq, term_aa=True, labels=uppercase)\n+ comp_default = parser.amino_acid_composition(seq, labels=uppercase)\nself.assertEqual(1, comp['nterm'+seq[0]])\nif len(seq) > 1:\nself.assertEqual(1, comp['cterm'+seq[-1]])\n@@ -61,32 +61,32 @@ class ParserTest(unittest.TestCase):\ndef test_cleave(self):\nfor seq in self.simple_sequences:\n- for elem in cleave(\n- seq, expasy_rules['trypsin'], int(random.uniform(1, 10))):\n+ for elem in parser.cleave(\n+ seq, parser.expasy_rules['trypsin'], int(random.uniform(1, 10))):\nself.assertIn(elem, seq)\nself.assertTrue(any(elem == seq\n- for elem in cleave(seq, expasy_rules['trypsin'], len(seq))))\n+ for elem in parser.cleave(seq, parser.expasy_rules['trypsin'], len(seq))))\ndef test_cleave_min_length(self):\nfor seq in self.simple_sequences:\nml = random.uniform(1, 5)\n- for elem in cleave(\n- seq, expasy_rules['trypsin'], int(random.uniform(1, 10)), ml):\n+ for elem in parser.cleave(\n+ seq, parser.expasy_rules['trypsin'], int(random.uniform(1, 10)), ml):\nself.assertTrue(len(elem) >= ml)\ndef test_num_sites(self):\nself.assertEqual(\n- num_sites('RKCDE', 'K'), 1)\n+ parser.num_sites('RKCDE', 'K'), 1)\nself.assertEqual(\n- num_sites('RKCDE', 'E'), 0)\n+ parser.num_sites('RKCDE', 'E'), 0)\nself.assertEqual(\n- num_sites('RKCDE', 'R'), 1)\n+ parser.num_sites('RKCDE', 'R'), 1)\nself.assertEqual(\n- num_sites('RKCDE', 'Z'), 0)\n+ parser.num_sites('RKCDE', 'Z'), 0)\ndef test_isoforms_simple(self):\nself.assertEqual(\n- set(isoforms('PEPTIDE',\n+ set(parser.isoforms('PEPTIDE',\nvariable_mods={'xx': ['A', 'B', 'P', 'E']})),\n{'PEPTIDE', 'PEPTIDxxE', 'PExxPTIDE', 'PExxPTIDxxE', 'PxxEPTIDE',\n'PxxEPTIDxxE', 'PxxExxPTIDE', 'PxxExxPTIDxxE', 'xxPEPTIDE',\n@@ -95,22 +95,22 @@ class ParserTest(unittest.TestCase):\ndef test_isoforms_universal(self):\nself.assertEqual(\n- set(isoforms('PEPTIDE',\n+ set(parser.isoforms('PEPTIDE',\nvariable_mods={'xx-': True})),\n{'PEPTIDE', 'xx-PEPTIDE'})\nself.assertEqual(\n- set(isoforms('PEPTIDE',\n+ set(parser.isoforms('PEPTIDE',\nvariable_mods={'-xx': True})),\n{'PEPTIDE', 'PEPTIDE-xx'})\nfor seq in self.simple_sequences:\nself.assertEqual(\n- sum(1 for _ in isoforms(seq,\n+ sum(1 for _ in parser.isoforms(seq,\nvariable_mods={'x': True})),\n2**len(seq))\ndef test_isoforms_terminal(self):\nself.assertEqual(\n- set(isoforms('PEPTIDE',\n+ set(parser.isoforms('PEPTIDE',\nvariable_mods={'xx': ['ntermP'], 'yy-': 'P'})),\n{'PEPTIDE', 'xxPEPTIDE', 'yy-PEPTIDE', 'yy-xxPEPTIDE'})\n@@ -118,16 +118,16 @@ class ParserTest(unittest.TestCase):\nfor j in range(50):\nL = random.randint(1, 10)\npeptide = ''.join([random.choice(self.labels) for _ in range(L)])\n- modseqs = isoforms(peptide, variable_mods=self.potential,\n+ modseqs = parser.isoforms(peptide, variable_mods=self.potential,\nfixed_mods=self.constant, labels=self.labels)\nforms = sum(1 for x in modseqs)\n- pp = parse(peptide, labels=self.extlabels)\n+ pp = parser.parse(peptide, labels=self.extlabels)\nN = 0\nif pp[0] =='N': N += 1\nif pp[-1] == 'C': N += 1\nfor p in modseqs:\nself.assertEqual(len(pp),\n- length(p, labels=self.extlabels))\n+ parser.length(p, labels=self.extlabels))\nself.assertEqual(forms, (3**pp.count('A')) *\n(2**(pp.count('X')+pp.count('C'))) * 2**N)\n@@ -136,10 +136,10 @@ class ParserTest(unittest.TestCase):\nL = random.randint(1, 10)\nM = random.randint(1, 10)\npeptide = ''.join([random.choice(self.labels) for _ in range(L)])\n- modseqs = isoforms(peptide, variable_mods=self.potential,\n+ modseqs = parser.isoforms(peptide, variable_mods=self.potential,\nlabels=self.labels,\nmax_mods=M, format='split')\n- pp = parse(peptide, labels=self.extlabels, split=True)\n+ pp = parser.parse(peptide, labels=self.extlabels, split=True)\nfor ms in modseqs:\nself.assertEqual(len(pp), len(ms))\nself.assertLessEqual(sum(i != j for i, j in zip(pp, ms)), M)\n@@ -148,31 +148,30 @@ class ParserTest(unittest.TestCase):\nfor j in range(50):\nL = random.randint(1, 10)\npeptide = ''.join([random.choice(self.labels) for _ in range(L)])\n- self.assertTrue(fast_valid(peptide, labels=self.labels))\n- self.assertTrue(valid(peptide, labels=self.labels))\n- self.assertTrue(valid(peptide))\n+ self.assertTrue(parser.fast_valid(peptide, labels=self.labels))\n+ self.assertTrue(parser.valid(peptide, labels=self.labels))\n+ self.assertTrue(parser.valid(peptide))\nfor aa in set(peptide):\nbad = peptide.replace(aa, 'Z')\n- self.assertFalse(fast_valid(bad, labels=self.labels))\n- self.assertFalse(valid(bad, labels=self.labels))\n+ self.assertFalse(parser.fast_valid(bad, labels=self.labels))\n+ self.assertFalse(parser.valid(bad, labels=self.labels))\ndef test_valid(self):\nfor j in range(50):\nL = random.randint(1, 10)\npeptide = ''.join([random.choice(self.labels) for _ in range(L)])\n- modseqs = isoforms(peptide, variable_mods=self.potential,\n+ modseqs = parser.isoforms(peptide, variable_mods=self.potential,\nfixed_mods=self.constant, labels=self.labels)\n- self.assertFalse(valid('H-' + peptide, labels=self.labels))\n+ self.assertFalse(parser.valid('H-' + peptide, labels=self.labels))\nfor s in modseqs:\n- self.assertTrue(valid(s, labels=self.extlabels))\n+ self.assertTrue(parser.valid(s, labels=self.extlabels))\nfor aa in set(peptide):\nbad = s.replace(aa, 'Z')\n- self.assertFalse(fast_valid(bad, labels=self.labels))\n- self.assertFalse(valid(bad, labels=self.labels))\n+ self.assertFalse(parser.fast_valid(bad, labels=self.labels))\n+ self.assertFalse(parser.valid(bad, labels=self.labels))\nif __name__ == '__main__':\nimport doctest\n- from pyteomics import parser\ndoctest.testmod(parser)\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove universal import in parser test |
377,522 | 05.01.2018 02:08:51 | -10,800 | 86c126aa5540aa8a88c552d185f4b73c15f77c11 | Set _default_id_attr on MzXML to "num" | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzxml.py",
"new_path": "pyteomics/mzxml.py",
"diff": "@@ -60,7 +60,6 @@ This module requires :py:mod:`lxml` and :py:mod:`numpy`.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-from collections import deque, defaultdict\nimport heapq\nfrom . import xml, auxiliary as aux\n@@ -133,6 +132,7 @@ class MzXML(xml.ArrayConversionMixin, xml.IndexSavingXML):\n_indexed_tag_keys = {'scan': 'num'}\n_default_version = None\n_default_schema = xml._mzxml_schema_defaults\n+ _default_id_attr = 'num'\ndef __init__(self, *args, **kwargs):\nself.decode_binary = kwargs.pop('decode_binary', True)\n@@ -185,7 +185,6 @@ class MzXML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nfor item in super(MzXML, self).iterfind(path, **kwargs):\nyield item\n-\ndef read(source, read_schema=False, iterative=True, use_index=False, dtype=None):\n\"\"\"Parse `source` and iterate through spectra.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -31,7 +31,6 @@ import re\nimport warnings\nwarnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\nimport socket\n-from functools import wraps\nfrom traceback import format_exc\nimport operator as op\nimport ast\n@@ -937,6 +936,9 @@ class IndexedXML(XML):\n----------\nelem_id : str\nThe id value of the entity to retrieve.\n+ id_key : str, optional\n+ The name of the XML attribute to use for lookup.\n+ Defaults to :py:attr:`self._default_id_attr`.\nReturns\n-------\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Set _default_id_attr on MzXML to "num" |
377,522 | 02.02.2018 01:37:23 | -10,800 | ce5d6a5dac4f13235e4dee4a591d28d3c62d999c | Add keep_cterm in reverse and shuffle decoy functions | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -169,7 +169,7 @@ def write(entries, output=None):\nreturn output.file\n-def reverse(sequence, keep_nterm=False):\n+def reverse(sequence, keep_nterm=False, keep_cterm=False):\n\"\"\"\nCreate a decoy sequence by reversing the original one.\n@@ -180,17 +180,22 @@ def reverse(sequence, keep_nterm=False):\nkeep_nterm : bool, optional\nIf :py:const:`True`, then the N-terminal residue will be kept.\nDefault is :py:const:`False`.\n+ keep_cterm : bool, optional\n+ If :py:const:`True`, then the C-terminal residue will be kept.\n+ Default is :py:const:`False`.\nReturns\n-------\ndecoy_sequence : str\nThe decoy sequence.\n\"\"\"\n- if keep_nterm and sequence:\n- return sequence[0] + reverse(sequence[1:], False)\n- return sequence[::-1]\n+ start = 1 if keep_nterm else 0\n+ end = len(sequence)-1 if keep_cterm else len(sequence)\n+ if start == end:\n+ return sequence\n+ return sequence[:start] + sequence[start:end][::-1] + sequence[end:]\n-def shuffle(sequence, keep_nterm=False):\n+def shuffle(sequence, keep_nterm=False, keep_cterm=False):\n\"\"\"\nCreate a decoy sequence by shuffling the original one.\n@@ -201,14 +206,22 @@ def shuffle(sequence, keep_nterm=False):\nkeep_nterm : bool, optional\nIf :py:const:`True`, then the N-terminal residue will be kept.\nDefault is :py:const:`False`.\n+ keep_cterm : bool, optional\n+ If :py:const:`True`, then the C-terminal residue will be kept.\n+ Default is :py:const:`False`.\nReturns\n-------\ndecoy_sequence : str\nThe decoy sequence.\n\"\"\"\n- if keep_nterm and sequence:\n- return sequence[0] + shuffle(sequence[1:], False)\n+ start = 1 if keep_nterm else 0\n+ end = len(sequence)-1 if keep_cterm else len(sequence)\n+ if start == end:\n+ return sequence\n+ elif keep_cterm or keep_nterm:\n+ return sequence[:start] + shuffle(sequence[start:end]) + sequence[end:]\n+\nmodified_sequence = list(sequence)\nrandom.shuffle(modified_sequence)\nreturn ''.join(modified_sequence)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -3,16 +3,15 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\n-from pyteomics.fasta import *\n+from pyteomics import fasta\nimport random\nimport string\nclass FastaTest(unittest.TestCase):\ndef setUp(self):\nself.fasta_file = 'test.fasta'\n- self.fasta_entries_short = list(read(self.fasta_file,\n- ignore_comments=True))\n- self.fasta_entries_long = list(read(self.fasta_file))\n+ self.fasta_entries_short = list(fasta.read(self.fasta_file, ignore_comments=True))\n+ self.fasta_entries_long = list(fasta.read(self.fasta_file))\ndef test_simple_read_long_comments(self):\nself.assertEqual(self.fasta_entries_long,\n@@ -29,8 +28,8 @@ class FastaTest(unittest.TestCase):\ndef test_decoy_sequence_reverse(self):\nsequence = ''.join(random.choice(string.ascii_uppercase)\nfor i in range(random.randint(1, 50)))\n- self.assertEqual(decoy_sequence(sequence, 'reverse'),\n- sequence[::-1])\n+ self.assertEqual(fasta.decoy_sequence(sequence, 'reverse'), sequence[::-1])\n+ self.assertEqual(fasta.reverse(sequence), sequence[::-1])\ndef test_decoy_sequence_shuffle(self):\nsequences = [''.join(random.choice(string.ascii_uppercase)\n@@ -38,7 +37,15 @@ class FastaTest(unittest.TestCase):\nfor j in range(10)]\ntest = True\nfor s in sequences:\n- ss = decoy_sequence(s, 'shuffle')\n+ ss = fasta.decoy_sequence(s, 'shuffle')\n+ self.assertEqual(sorted(list(s)), sorted(list(ss)))\n+ if not all(a == b for a, b in zip(s, ss)):\n+ test = False\n+ self.assertFalse(test)\n+\n+ test = True\n+ for s in sequences:\n+ ss = fasta.shuffle(s)\nself.assertEqual(sorted(list(s)), sorted(list(ss)))\nif not all(a == b for a, b in zip(s, ss)):\ntest = False\n@@ -49,8 +56,9 @@ class FastaTest(unittest.TestCase):\nfor i in range(random.randint(1, 50)))\nfor j in range(10)]\nfor s in sequences:\n- ss = decoy_sequence(s, 'fused')\n+ ss = fasta.decoy_sequence(s, 'fused')\nself.assertEqual(ss, s[::-1] + 'R' + s)\n+ self.assertEqual(ss, fasta.fused_decoy(s))\ndef test_decoy_keep_nterm(self):\nsequences = [''.join(random.choice(string.ascii_uppercase)\n@@ -58,29 +66,43 @@ class FastaTest(unittest.TestCase):\nfor j in range(10)]\nfor mode in ('shuffle', 'reverse'):\nfor seq in sequences:\n- self.assertEqual(seq[0], decoy_sequence(seq, mode, keep_nterm=True)[0])\n+ self.assertEqual(seq[0], fasta.decoy_sequence(seq, mode, keep_nterm=True)[0])\n+\n+ for seq in sequences:\n+ self.assertEqual(seq[1:][::-1], fasta.reverse(seq, keep_nterm=True)[1:])\n+\n+ def test_decoy_keep_cterm(self):\n+ sequences = [''.join(random.choice(string.ascii_uppercase)\n+ for i in range(random.randint(1, 50)))\n+ for j in range(10)]\n+ for mode in ('shuffle', 'reverse'):\n+ for seq in sequences:\n+ self.assertEqual(seq[-1], fasta.decoy_sequence(seq, mode, keep_cterm=True)[-1])\n+\n+ for seq in sequences:\n+ self.assertEqual(seq[:-1][::-1], fasta.reverse(seq, keep_cterm=True)[:-1])\ndef test_read_and_write_fasta_short(self):\nwith tempfile.TemporaryFile(mode='r+') as new_fasta_file:\n- write(read(self.fasta_file, ignore_comments=True),\n+ fasta.write(fasta.read(self.fasta_file, ignore_comments=True),\nnew_fasta_file)\nnew_fasta_file.seek(0)\n- new_entries = list(read(new_fasta_file, ignore_comments=True))\n+ new_entries = list(fasta.read(new_fasta_file, ignore_comments=True))\nself.assertEqual(new_entries, self.fasta_entries_short)\ndef test_read_and_write_long(self):\nwith tempfile.TemporaryFile(mode='r+') as new_fasta_file:\n- write(read(self.fasta_file), new_fasta_file)\n+ fasta.write(fasta.read(self.fasta_file), new_fasta_file)\nnew_fasta_file.seek(0)\n- new_entries = list(read(new_fasta_file))\n+ new_entries = list(fasta.read(new_fasta_file))\nself.assertEqual(new_entries, self.fasta_entries_long)\ndef test_write_decoy_db(self):\nwith tempfile.TemporaryFile(mode='r+') as decdb:\n- write_decoy_db(self.fasta_file, decdb,\n+ fasta.write_decoy_db(self.fasta_file, decdb,\ndecoy_only=False, prefix='PREFIX_')\ndecdb.seek(0)\n- all_entries = list(read(decdb, False))\n+ all_entries = list(fasta.read(decdb, False))\nself.assertEqual(all_entries, self.fasta_entries_long +\n[('PREFIX_' + a, b[::-1]) for a, b in self.fasta_entries_long])\n@@ -90,9 +112,9 @@ class FastaTest(unittest.TestCase):\n' GN=acoX PE=4 SV=2')\nsequence = 'SEQUENCE'\nwith tempfile.TemporaryFile(mode='r+') as db:\n- write([(header, sequence)], db)\n+ fasta.write([(header, sequence)], db)\ndb.seek(0)\n- entries = list(decoy_db(db, prefix='PREFIX_', parser=parse, decoy_only=True))\n+ entries = list(fasta.decoy_db(db, prefix='PREFIX_', parser=fasta.parse, decoy_only=True))\nparsed = {'GN': 'acoX',\n'OS': 'Ralstonia eutropha '\n@@ -124,7 +146,7 @@ class FastaTest(unittest.TestCase):\n'gene_id': 'ACOX',\n'name': 'Acetoin catabolism protein X',\n'taxon': 'RALEH'}\n- self.assertEqual(parse(header), parsed)\n+ self.assertEqual(fasta.parse(header), parsed)\ndef test_parser_uniptokb_isoform(self):\nheader = ('sp|Q4R572-2|1433B_MACFA Isoform Short of 14-3-3 protein beta'\n@@ -137,7 +159,7 @@ class FastaTest(unittest.TestCase):\n'id': 'Q4R572-2',\n'name': 'Isoform Short of 14-3-3 protein beta/alpha',\n'taxon': 'MACFA'}\n- self.assertEqual(parse(header), parsed)\n+ self.assertEqual(fasta.parse(header), parsed)\ndef test_parser_uniref(self):\nheader = ('>UniRef100_A5DI11 Elongation factor 2 n=1 '\n@@ -151,12 +173,12 @@ class FastaTest(unittest.TestCase):\n'type': 'UniRef100',\n'accession': 'A5DI11',\n'n': 1}\n- self.assertEqual(parse(header), parsed)\n+ self.assertEqual(fasta.parse(header), parsed)\ndef test_parser_uniparc(self):\nheader = '>UPI0000000005 status=active'\nparsed = {'id': 'UPI0000000005', 'status': 'active'}\n- self.assertEqual(parse(header), parsed)\n+ self.assertEqual(fasta.parse(header), parsed)\ndef test_parser_unimes(self):\nheader = ('MES00000000005 Putative uncharacterized protein GOS_3018412 '\n@@ -166,7 +188,7 @@ class FastaTest(unittest.TestCase):\n'SV': 1,\n'id': 'MES00000000005',\n'name': 'Putative uncharacterized protein GOS_3018412 (Fragment)'}\n- self.assertEqual(parse(header), parsed)\n+ self.assertEqual(fasta.parse(header), parsed)\ndef test_parser_spd(self):\nheader = ('>P31947|1433S_HUMAN| 14-3-3 protein sigma (Stratifin) '\n@@ -177,7 +199,7 @@ class FastaTest(unittest.TestCase):\n'gene_id': '1433S',\n'id': 'P31947',\n'taxon': 'HUMAN'}\n- self.assertEqual(parse(header), parsed)\n+ self.assertEqual(fasta.parse(header), parsed)\ndef test_parser_spd_mult_ids(self):\nheader = ('>P02763 Q8TC16|A1AG1_HUMAN| Alpha-1-acid glycoprotein 1 '\n@@ -188,7 +210,7 @@ class FastaTest(unittest.TestCase):\n'gene_id': 'A1AG1',\n'id': 'P02763 Q8TC16',\n'taxon': 'HUMAN'}\n- self.assertEqual(parse(header), parsed)\n+ self.assertEqual(fasta.parse(header), parsed)\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add keep_cterm in reverse and shuffle decoy functions |
377,522 | 06.02.2018 00:46:44 | -10,800 | e4d3a370c0d38cc0d7d2812554b55e8c73d30422 | Prevent some exceptions in mzid.DataFrame (fixes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -342,16 +342,25 @@ def DataFrame(*args, **kwargs):\nif evref:\nprot_descr, accessions, isd, starts, ends, lengths = [], [], [], [], [], []\nfor d in evref:\n- prot_descr.append(d['protein description'])\n- accessions.append(d['accession'])\n+ prot_descr.append(d.get('protein description'))\n+ accessions.append(d.get('accession'))\nisd.append(d.get('isDecoy'))\n- starts.append(d['start'])\n- ends.append(d['end'])\n- lengths.append(d['length'])\n+ starts.append(d.get('start'))\n+ ends.append(d.get('end'))\n+ lengths.append(d.get('length'))\nisd = all(isd)\nif sep is not None:\n+ if all(isinstance(prd, str) for prd in prot_descr):\nprot_descr = sep.join(prot_descr)\n+\n+ if all(isinstance(acc, str) for acc in accessions):\naccessions = sep.join(accessions)\n+\n+ if all(prd is None for prd in prot_descr):\n+ prot_descr = None\n+ if all(acc is None for acc in accessions):\n+ accessions = None\n+\ninfo.update((k, v) for k, v in evref[0].items() if isinstance(v, (str, int, float, list)))\ninfo['protein description'] = prot_descr\ninfo['accession'] = accessions\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Prevent some exceptions in mzid.DataFrame (fixes #21) |
377,522 | 09.02.2018 19:09:58 | -10,800 | 7e3ba2faac0cd2e0bbed7f96191c25aff749c8de | Update mzid schema info and move it to a separate file | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -98,13 +98,13 @@ This module requires :py:mod:`lxml`.\nimport warnings\nwarnings.formatwarning = lambda msg, *args: str(msg) + '\\n'\nfrom . import auxiliary as aux\n-from . import xml\n+from . import xml, _schema_defaults\nclass MzIdentML(xml.IndexSavingXML):\n\"\"\"Parser class for MzIdentML files.\"\"\"\nfile_format = 'mzIdentML'\n_root_element = 'MzIdentML'\n- _default_schema = xml._mzid_schema_defaults\n+ _default_schema = _schema_defaults._mzid_schema_defaults\n_default_version = '1.1.0'\n_default_iter_tag = 'SpectrumIdentificationResult'\n_structures_to_flatten = {'Fragmentation'}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -68,7 +68,7 @@ This module requires :py:mod:`lxml` and :py:mod:`numpy`.\nimport numpy as np\nimport re\nimport warnings\n-from . import xml, auxiliary as aux\n+from . import xml, auxiliary as aux, _schema_defaults\nfrom .xml import etree\nNON_STANDARD_DATA_ARRAY = 'non-standard data array'\n@@ -94,7 +94,7 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\n\"\"\"Parser class for mzML files.\"\"\"\nfile_format = 'mzML'\n_root_element = 'mzML'\n- _default_schema = xml._mzml_schema_defaults\n+ _default_schema = _schema_defaults._mzml_schema_defaults\n_default_version = '1.1.0'\n_default_iter_tag = 'spectrum'\n_structures_to_flatten = {'binaryDataArrayList'}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzxml.py",
"new_path": "pyteomics/mzxml.py",
"diff": "@@ -62,7 +62,7 @@ This module requires :py:mod:`lxml` and :py:mod:`numpy`.\nimport heapq\n-from . import xml, auxiliary as aux\n+from . import xml, auxiliary as aux, _schema_defaults\nimport numpy as np\n@@ -131,7 +131,7 @@ class MzXML(xml.ArrayConversionMixin, xml.IndexSavingXML):\n_indexed_tags = {'scan'}\n_indexed_tag_keys = {'scan': 'num'}\n_default_version = None\n- _default_schema = xml._mzxml_schema_defaults\n+ _default_schema = _schema_defaults._mzxml_schema_defaults\n_default_id_attr = 'num'\ndef __init__(self, *args, **kwargs):\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/openms/featurexml.py",
"new_path": "pyteomics/openms/featurexml.py",
"diff": "@@ -37,13 +37,13 @@ This module requres :py:mod:`lxml`.\n--------------------------------------------------------------------------------\n\"\"\"\n-from .. import xml, auxiliary as aux\n+from .. import xml, auxiliary as aux, _schema_defaults\nclass FeatureXML(xml.IndexedXML):\n\"\"\"Parser class for featureXML files.\"\"\"\nfile_format = 'featureXML'\n_root_element = 'featureMap'\n- _default_schema = xml._featurexml_schema_defaults\n+ _default_schema = _schema_defaults._featurexml_schema_defaults\n_default_version = '1.6'\n_default_iter_tag = 'feature'\n_structures_to_flatten = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/openms/trafoxml.py",
"new_path": "pyteomics/openms/trafoxml.py",
"diff": "@@ -36,13 +36,13 @@ This module requres :py:mod:`lxml`.\n--------------------------------------------------------------------------------\n\"\"\"\n-from .. import xml, auxiliary as aux\n+from .. import xml, auxiliary as aux, _schema_defaults\nclass TrafoXML(xml.XML):\n\"\"\"Parser class for trafoXML files.\"\"\"\nfile_format = 'trafoXML'\n_root_element = 'TrafoXML'\n- _default_schema = xml._trafoxml_schema_defaults\n+ _default_schema = _schema_defaults._trafoxml_schema_defaults\n_default_version = '1.0'\n_default_iter_tag = 'Pair'\n_schema_location_param = 'noNamespaceSchemaLocation'\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -95,13 +95,13 @@ This module requires :py:mod:`lxml`.\n# limitations under the License.\nfrom lxml import etree\n-from . import xml, auxiliary as aux\n+from . import xml, auxiliary as aux, _schema_defaults\nclass PepXML(xml.XML):\n\"\"\"Parser class for pepXML files.\"\"\"\nfile_format = 'pepXML'\n_root_element = 'msms_pipeline_analysis'\n- _default_schema = xml._pepxml_schema_defaults\n+ _default_schema = _schema_defaults._pepxml_schema_defaults\n_default_version = '1.15'\n_default_iter_tag = 'spectrum_query'\n_structures_to_flatten = {'search_score_summary', 'modification_info'}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -88,13 +88,13 @@ This module requires :py:mod:`lxml` and :py:mod:`numpy`.\nimport itertools as it\nimport operator\n-from . import xml, auxiliary as aux\n+from . import xml, auxiliary as aux, _schema_defaults\nclass TandemXML(xml.XML):\n\"\"\"Parser class for TandemXML files.\"\"\"\nfile_format = \"TandemXML\"\n_root_element = \"bioml\"\n- _default_schema = xml._tandem_schema_defaults\n+ _default_schema = _schema_defaults._tandem_schema_defaults\n_default_iter_tag = 'group[type=\"model\"]'\n_structures_to_flatten = {'domain'}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -43,6 +43,7 @@ from .auxiliary import FileReader, PyteomicsError, basestring, _file_obj\nfrom .auxiliary import unitint, unitfloat, unitstr\nfrom .auxiliary import _keepstate_method as _keepstate\nfrom .auxiliary import BinaryDataArrayTransformer\n+\ntry: # Python 2.7\nfrom urllib2 import urlopen, URLError\nexcept ImportError: # Python 3.x\n@@ -1103,347 +1104,3 @@ class ArrayConversionMixin(BinaryDataArrayTransformer):\nkey = record.key\nreturn self._convert_array(key, array)\n-\n-_trafoxml_schema_defaults = {'bools': set(),\n- 'charlists': set(),\n- 'floatlists': set(),\n- 'floats': {('Pair', 'from'), ('Pair', 'to'), ('TrafoXML', 'version')},\n- 'intlists': set(),\n- 'ints': {('Pairs', 'count')},\n- 'lists': {'Pair', 'Param'}}\n-\n-_featurexml_schema_defaults = {'bools': {\n- ('PeptideIdentification', 'higher_score_better'),\n- ('ProteinIdentification', 'higher_score_better'),\n- ('UnassignedPeptideIdentification', 'higher_score_better')},\n- 'charlists': set(),\n- 'floatlists': set(),\n- 'floats': {('PeptideHit', 'score'),\n- ('PeptideIdentification', 'MZ'),\n- ('PeptideIdentification', 'RT'),\n- ('PeptideIdentification', 'significance_threshold'),\n- ('ProteinHit', 'score'),\n- ('ProteinIdentification', 'significance_threshold'),\n- ('SearchParameters', 'peak_mass_tolerance'),\n- ('SearchParameters', 'precursor_peak_tolerance'),\n- ('UnassignedPeptideIdentification', 'MZ'),\n- ('UnassignedPeptideIdentification', 'RT'),\n- ('UnassignedPeptideIdentification', 'significance_threshold'),\n- ('pt', 'x'), ('pt', 'y'), ('position', 'position'),\n- ('feature', 'overallquality'), ('feature', 'intensity')\n- },\n- 'intlists': set(),\n- 'ints': {('PeptideHit', 'charge'),\n- ('PeptideIdentification', 'spectrum_reference'),\n- ('SearchParameters', 'missed_cleavages'),\n- ('UnassignedPeptideIdentification', 'spectrum_reference'),\n- ('featureList', 'count'), ('convexhull', 'nr'),\n- ('position', 'dim'), ('feature', 'spectrum_index'),\n- ('feature', 'charge'), ('quality', 'dim'), ('quality', 'quality')},\n- 'lists': {'FixedModification', 'IdentificationRun',\n- 'PeptideHit', 'PeptideIdentification', 'ProteinHit',\n- 'UnassignedPeptideIdentification', 'VariableModification',\n- 'convexhull', 'dataProcessing', 'feature', 'hposition',\n- 'hullpoint', 'param', 'position', 'processingAction',\n- 'pt', 'quality', 'userParam'}}\n-\n-_mzid_schema_defaults = {\n- 'ints': {('DBSequence', 'length'),\n- ('IonType', 'charge'),\n- ('BibliographicReference', 'year'),\n- ('SubstitutionModification', 'location'),\n- ('PeptideEvidence', 'end'),\n- ('Enzyme', 'missedCleavages'),\n- ('PeptideEvidence', 'start'),\n- ('Modification', 'location'),\n- ('SpectrumIdentificationItem', 'rank'),\n- ('SpectrumIdentificationItem', 'chargeState'),\n- ('SearchDatabase', 'numDatabaseSequences')},\n- 'floats': {('SubstitutionModification', 'monoisotopicMassDelta'),\n- ('SpectrumIdentificationItem', 'experimentalMassToCharge'),\n- ('Residue', 'mass'),\n- ('SpectrumIdentificationItem', 'calculatedPI'),\n- ('Modification', 'avgMassDelta'),\n- ('SearchModification', 'massDelta'),\n- ('Modification', 'monoisotopicMassDelta'),\n- ('SubstitutionModification', 'avgMassDelta'),\n- ('SpectrumIdentificationItem', 'calculatedMassToCharge')},\n- 'bools': {('PeptideEvidence', 'isDecoy'),\n- ('SearchModification', 'fixedMod'),\n- ('Enzymes', 'independent'),\n- ('Enzyme', 'semiSpecific'),\n- ('SpectrumIdentificationItem', 'passThreshold'),\n- ('ProteinDetectionHypothesis', 'passThreshold')},\n- 'lists': {'SourceFile', 'SpectrumIdentificationProtocol',\n- 'ProteinDetectionHypothesis', 'SpectraData', 'Enzyme',\n- 'Modification', 'MassTable', 'DBSequence',\n- 'InputSpectra', 'cv', 'IonType', 'SearchDatabaseRef',\n- 'Peptide', 'SearchDatabase', 'ContactRole', 'cvParam',\n- 'ProteinAmbiguityGroup', 'SubSample',\n- 'SpectrumIdentificationItem', 'TranslationTable',\n- 'AmbiguousResidue', 'SearchModification',\n- 'SubstitutionModification', 'PeptideEvidenceRef',\n- 'PeptideEvidence', 'SpecificityRules',\n- 'SpectrumIdentificationResult', 'Filter', 'FragmentArray',\n- 'InputSpectrumIdentifications', 'BibliographicReference',\n- 'SpectrumIdentification', 'Sample', 'Affiliation',\n- 'PeptideHypothesis',\n- 'Measure', 'SpectrumIdentificationItemRef'},\n- 'intlists': {('IonType', 'index'), ('MassTable', 'msLevel')},\n- 'floatlists': {('FragmentArray', 'values')},\n- 'charlists': {('Modification', 'residues'),\n- ('SearchModification', 'residues')}}\n-\n-_tandem_schema_defaults = {'ints': {\n- ('group', 'z'), ('aa', 'at')} | {('domain', k) for k in [\n- 'missed_cleavages', 'start', 'end', 'y_ions', 'b_ions',\n- 'a_ions', 'x_ions', 'c_ions', 'z_ions']},\n-\n- 'floats': {('group', k) for k in [\n- 'fI', 'sumI', 'maxI', 'mh', 'expect', 'rt']} | {\n- ('domain', k) for k in [\n- 'expect', 'hyperscore', 'b_score', 'y_score',\n- 'a_score', 'x_score', 'c_score', 'z_score',\n- 'nextscore', 'delta', 'mh']} | {\n- ('protein', 'expect'), ('protein', 'sumI'),\n- ('aa', 'modified')},\n-\n- 'bools': set(),\n- 'lists': {'group', 'trace', 'attribute', 'protein', 'aa', 'note'},\n- 'floatlists': {('values', 'values')},\n- 'intlists': set(), 'charlists': set()}\n-\n-_mzxml_schema_defaults = {'bools': {('dataProcessing', 'centroided'),\n- ('dataProcessing', 'chargeDeconvoluted'),\n- ('dataProcessing', 'deisotoped'),\n- ('dataProcessing', 'spotIntegration'),\n- ('maldi', 'collisionGas'),\n- ('scan', 'centroided'),\n- ('scan', 'chargeDeconvoluted'),\n- ('scan', 'deisotoped')},\n- 'charlists': set(),\n- 'floatlists': set(),\n- 'floats': {('dataProcessing', 'intensityCutoff'),\n- ('precursorMz', 'precursorIntensity'),\n- ('precursorMz', 'windowWideness'),\n- ('precursorMz', 'precursorMz'),\n- ('scan', 'basePeakIntensity'),\n- ('scan', 'basePeakMz'),\n- ('scan', 'cidGasPressure'),\n- ('scan', 'collisionEnergy'),\n- ('scan', 'compensationVoltage'),\n- ('scan', 'endMz'),\n- ('scan', 'highMz'),\n- ('scan', 'ionisationEnergy'),\n- ('scan', 'lowMz'),\n- ('scan', 'startMz'),\n- ('scan', 'totIonCurrent')},\n- 'duration': {(\"scan\", \"retentionTime\")\n- },\n- 'intlists': set(),\n- 'ints': {('msInstrument', 'msInstrumentID'),\n- ('peaks', 'compressedLen'),\n- ('precursorMz', 'precursorCharge'),\n- ('robot', 'deadVolume'),\n- ('scan', 'msInstrumentID'),\n- ('scan', 'peaksCount'),\n- ('scanOrigin', 'num'),\n- ('scan', 'msLevel')},\n- 'lists': {'dataProcessing',\n- 'msInstrument',\n- 'parentFile',\n- 'peaks',\n- 'plate',\n- 'precursorMz',\n- 'scanOrigin',\n- 'spot'}}\n-\n-_mzml_schema_defaults = {'ints': {\n- ('spectrum', 'index'),\n- ('instrumentConfigurationList', 'count'),\n- ('binaryDataArray', 'encodedLength'),\n- ('cvList', 'count'),\n- ('binaryDataArray', 'arrayLength'),\n- ('scanWindowList', 'count'),\n- ('componentList', 'count'),\n- ('sourceFileList', 'count'),\n- ('productList', 'count'),\n- ('referenceableParamGroupList', 'count'),\n- ('scanList', 'count'),\n- ('spectrum', 'defaultArrayLength'),\n- ('dataProcessingList', 'count'),\n- ('sourceFileRefList', 'count'),\n- ('scanSettingsList', 'count'),\n- ('selectedIonList', 'count'),\n- ('chromatogram', 'defaultArrayLength'),\n- ('precursorList', 'count'),\n- ('chromatogram', 'index'),\n- ('processingMethod', 'order'),\n- ('targetList', 'count'),\n- ('sampleList', 'count'),\n- ('softwareList', 'count'),\n- ('binaryDataArrayList', 'count'),\n- ('spectrumList', 'count'),\n- ('chromatogramList', 'count')},\n- 'floats': {},\n- 'bools': {},\n- 'lists': {'scan', 'spectrum', 'sample', 'cv', 'dataProcessing',\n- 'cvParam', 'source', 'userParam', 'detector', 'product',\n- 'referenceableParamGroupRef', 'selectedIon', 'sourceFileRef',\n- 'binaryDataArray', 'analyzer', 'scanSettings',\n- 'instrumentConfiguration', 'chromatogram', 'target',\n- 'processingMethod', 'precursor', 'sourceFile',\n- 'referenceableParamGroup', 'contact', 'scanWindow', 'software'},\n- 'intlists': {},\n- 'floatlists': {},\n- 'charlists': {}}\n-\n-_pepxml_schema_defaults = {'ints':\n- {('xpressratio_summary', 'xpress_light'),\n- ('distribution_point', 'obs_5_distr'),\n- ('distribution_point', 'obs_2_distr'),\n- ('enzymatic_search_constraint', 'max_num_internal_cleavages'),\n- ('asapratio_lc_heavypeak', 'right_valley'),\n- ('libra_summary', 'output_type'),\n- ('distribution_point', 'obs_7_distr'),\n- ('spectrum_query', 'index'),\n- ('data_filter', 'number'),\n- ('roc_data_point', 'num_incorr'),\n- ('search_hit', 'num_tol_term'),\n- ('search_hit', 'num_missed_cleavages'),\n- ('asapratio_lc_lightpeak', 'right_valley'),\n- ('libra_summary', 'normalization'),\n- ('specificity', 'min_spacing'),\n- ('database_refresh_timestamp', 'min_num_enz_term'),\n- ('enzymatic_search_constraint', 'min_number_termini'),\n- ('xpressratio_result', 'light_lastscan'),\n- ('distribution_point', 'obs_3_distr'),\n- ('spectrum_query', 'end_scan'),\n- ('analysis_result', 'id'),\n- ('search_database', 'size_in_db_entries'),\n- ('search_hit', 'hit_rank'),\n- ('alternative_protein', 'num_tol_term'),\n- ('search_hit', 'num_tot_proteins'),\n- ('asapratio_summary', 'elution'),\n- ('search_hit', 'tot_num_ions'),\n- ('error_point', 'num_incorr'),\n- ('mixture_model', 'precursor_ion_charge'),\n- ('roc_data_point', 'num_corr'),\n- ('search_hit', 'num_matched_ions'),\n- ('dataset_derivation', 'generation_no'),\n- ('xpressratio_result', 'heavy_firstscan'),\n- ('xpressratio_result', 'heavy_lastscan'),\n- ('error_point', 'num_corr'),\n- ('spectrum_query', 'assumed_charge'),\n- ('analysis_timestamp', 'id'),\n- ('xpressratio_result', 'light_firstscan'),\n- ('distribution_point', 'obs_4_distr'),\n- ('asapratio_lc_heavypeak', 'left_valley'),\n- ('fragment_masses', 'channel'),\n- ('distribution_point', 'obs_6_distr'),\n- ('affected_channel', 'channel'),\n- ('search_result', 'search_id'),\n- ('contributing_channel', 'channel'),\n- ('asapratio_lc_lightpeak', 'left_valley'),\n- ('asapratio_peptide_data', 'area_flag'),\n- ('search_database', 'size_of_residues'),\n- ('asapratio_peptide_data', 'cidIndex'),\n- ('mixture_model', 'num_iterations'),\n- ('mod_aminoacid_mass', 'position'),\n- ('spectrum_query', 'start_scan'),\n- ('asapratio_summary', 'area_flag'),\n- ('mixture_model', 'tot_num_spectra'),\n- ('search_summary', 'search_id'),\n- ('xpressratio_timestamp', 'xpress_light'),\n- ('distribution_point', 'obs_1_distr'),\n- ('intensity', 'channel'),\n- ('asapratio_contribution', 'charge'),\n- ('libra_summary', 'centroiding_preference')},\n- 'floats':\n- {('asapratio_contribution', 'error'),\n- ('asapratio_lc_heavypeak', 'area_error'),\n- ('modification_info', 'mod_nterm_mass'),\n- ('distribution_point', 'model_4_neg_distr'),\n- ('distribution_point', 'model_5_pos_distr'),\n- ('spectrum_query', 'precursor_neutral_mass'),\n- ('asapratio_lc_heavypeak', 'time_width'),\n- ('xpressratio_summary', 'masstol'),\n- ('affected_channel', 'correction'),\n- ('distribution_point', 'model_7_neg_distr'),\n- ('error_point', 'error'),\n- ('intensity', 'target_mass'),\n- ('roc_data_point', 'sensitivity'),\n- ('distribution_point', 'model_4_pos_distr'),\n- ('distribution_point', 'model_2_neg_distr'),\n- ('distribution_point', 'model_3_pos_distr'),\n- ('mixture_model', 'prior_probability'),\n- ('roc_data_point', 'error'),\n- ('intensity', 'normalized'),\n- ('modification_info', 'mod_cterm_mass'),\n- ('asapratio_lc_lightpeak', 'area_error'),\n- ('distribution_point', 'fvalue'),\n- ('distribution_point', 'model_1_neg_distr'),\n- ('peptideprophet_summary', 'min_prob'),\n- ('asapratio_result', 'mean'),\n- ('point', 'pos_dens'),\n- ('fragment_masses', 'mz'),\n- ('mod_aminoacid_mass', 'mass'),\n- ('distribution_point', 'model_6_neg_distr'),\n- ('asapratio_lc_lightpeak', 'time_width'),\n- ('asapratio_result', 'heavy2light_error'),\n- ('peptideprophet_result', 'probability'),\n- ('error_point', 'min_prob'),\n- ('peptideprophet_summary', 'est_tot_num_correct'),\n- ('roc_data_point', 'min_prob'),\n- ('asapratio_result', 'heavy2light_mean'),\n- ('distribution_point', 'model_5_neg_distr'),\n- ('mixturemodel', 'neg_bandwidth'),\n- ('asapratio_result', 'error'),\n- ('xpressratio_result', 'light_mass'),\n- ('point', 'neg_dens'),\n- ('asapratio_lc_lightpeak', 'area'),\n- ('distribution_point', 'model_1_pos_distr'),\n- ('xpressratio_result', 'mass_tol'),\n- ('mixturemodel', 'pos_bandwidth'),\n- ('xpressratio_result', 'light_area'),\n- ('asapratio_peptide_data', 'heavy_mass'),\n- ('distribution_point', 'model_2_pos_distr'),\n- ('search_hit', 'calc_neutral_pep_mass'),\n- ('intensity', 'absolute'),\n- ('asapratio_peptide_data', 'light_mass'),\n- ('distribution_point', 'model_3_neg_distr'),\n- ('aminoacid_modification', 'mass'),\n- ('asapratio_lc_heavypeak', 'time'),\n- ('asapratio_lc_lightpeak', 'time'),\n- ('asapratio_lc_lightpeak', 'background'),\n- ('mixture_model', 'est_tot_correct'),\n- ('point', 'value'),\n- ('asapratio_lc_heavypeak', 'background'),\n- ('terminal_modification', 'mass'),\n- ('fragment_masses', 'offset'),\n- ('xpressratio_result', 'heavy_mass'),\n- ('search_hit', 'protein_mw'),\n- ('libra_summary', 'mass_tolerance'),\n- ('spectrum_query', 'retention_time_sec'),\n- ('distribution_point', 'model_7_pos_distr'),\n- ('asapratio_lc_heavypeak', 'area'),\n- ('alternative_protein', 'protein_mw'),\n- ('asapratio_contribution', 'ratio'),\n- ('xpressratio_result', 'heavy_area'),\n- ('distribution_point', 'model_6_pos_distr')},\n- 'bools':\n- {('sample_enzyme', 'independent'),\n- ('intensity', 'reject'),\n- ('libra_result', 'is_rejected')},\n- 'intlists': set(),\n- 'floatlists': set(),\n- 'charlists': set(),\n- 'lists': {'point', 'aminoacid_modification', 'msms_run_summary',\n- 'mixturemodel', 'search_hit', 'mixturemodel_distribution',\n- 'sequence_search_constraint', 'specificity', 'alternative_protein',\n- 'analysis_result', 'data_filter', 'fragment_masses', 'error_point',\n- 'parameter', 'spectrum_query', 'search_result', 'affected_channel',\n- 'analysis_summary', 'roc_data_point', 'distribution_point',\n- 'search_summary', 'mod_aminoacid_mass', 'search_score', 'intensity',\n- 'analysis_timestamp', 'mixture_model', 'terminal_modification',\n- 'contributing_channel', 'inputfile'}}\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Update mzid schema info and move it to a separate file |
377,522 | 09.02.2018 19:18:02 | -10,800 | 9ca7f0d4f7a5f494cfa510a360c9140c25d4d046 | Add new schema defaults file | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "pyteomics/_schema_defaults.py",
"diff": "+_mzid_schema_defaults = {'bools': {('Enzyme', 'semiSpecific'),\n+ ('Enzymes', 'independent'),\n+ ('PeptideEvidence', 'isDecoy'),\n+ ('ProteinDetectionHypothesis', 'passThreshold'),\n+ ('SearchModification', 'fixedMod'),\n+ ('SpectrumIdentificationItem', 'passThreshold')},\n+ 'charlists': {('Modification', 'residues'),\n+ ('SearchModification', 'residues')},\n+ 'floatlists': {('FragmentArray', 'values')},\n+ 'floats': {('Modification', 'avgMassDelta'),\n+ ('Modification', 'monoisotopicMassDelta'),\n+ ('Residue', 'mass'),\n+ ('SearchModification', 'massDelta'),\n+ ('SpectrumIdentificationItem', 'calculatedMassToCharge'),\n+ ('SpectrumIdentificationItem', 'calculatedPI'),\n+ ('SpectrumIdentificationItem', 'experimentalMassToCharge'),\n+ ('SubstitutionModification', 'avgMassDelta'),\n+ ('SubstitutionModification', 'monoisotopicMassDelta')},\n+ 'intlists': {('IonType', 'index'), ('MassTable', 'msLevel')},\n+ 'ints': {('BibliographicReference', 'year'),\n+ ('DBSequence', 'length'),\n+ ('Enzyme', 'missedCleavages'),\n+ ('IonType', 'charge'),\n+ ('Modification', 'location'),\n+ ('PeptideEvidence', 'end'),\n+ ('PeptideEvidence', 'start'),\n+ ('SearchDatabase', 'numDatabaseSequences'),\n+ ('SearchDatabase', 'numResidues'),\n+ ('SpectrumIdentificationItem', 'chargeState'),\n+ ('SpectrumIdentificationItem', 'rank'),\n+ ('SpectrumIdentificationList', 'numSequencesSearched'),\n+ ('SubstitutionModification', 'location')},\n+ 'lists': {'Affiliation',\n+ 'AmbiguousResidue',\n+ 'AnalysisSoftware',\n+ 'BibliographicReference',\n+ 'ContactRole',\n+ 'DBSequence',\n+ 'Enzyme',\n+ 'Filter',\n+ 'FragmentArray',\n+ 'InputSpectra',\n+ 'InputSpectrumIdentifications',\n+ 'IonType',\n+ 'MassTable',\n+ 'Measure',\n+ 'Modification',\n+ 'Peptide',\n+ 'PeptideEvidence',\n+ 'PeptideEvidenceRef',\n+ 'PeptideHypothesis',\n+ 'ProteinAmbiguityGroup',\n+ 'ProteinDetectionHypothesis',\n+ 'Residue',\n+ 'Sample',\n+ 'SearchDatabase',\n+ 'SearchDatabaseRef',\n+ 'SearchModification',\n+ 'SourceFile',\n+ 'SpecificityRules',\n+ 'SpectraData',\n+ 'SpectrumIdentification',\n+ 'SpectrumIdentificationItem',\n+ 'SpectrumIdentificationItemRef',\n+ 'SpectrumIdentificationList',\n+ 'SpectrumIdentificationProtocol',\n+ 'SpectrumIdentificationResult',\n+ 'SubSample',\n+ 'SubstitutionModification',\n+ 'TranslationTable',\n+ 'cv',\n+ 'cvParam'}}\n+\n+_trafoxml_schema_defaults = {'bools': set(),\n+ 'charlists': set(),\n+ 'floatlists': set(),\n+ 'floats': {('Pair', 'from'), ('Pair', 'to'), ('TrafoXML', 'version')},\n+ 'intlists': set(),\n+ 'ints': {('Pairs', 'count')},\n+ 'lists': {'Pair', 'Param'}}\n+\n+_featurexml_schema_defaults = {'bools': {\n+ ('PeptideIdentification', 'higher_score_better'),\n+ ('ProteinIdentification', 'higher_score_better'),\n+ ('UnassignedPeptideIdentification', 'higher_score_better')},\n+ 'charlists': set(),\n+ 'floatlists': set(),\n+ 'floats': {('PeptideHit', 'score'),\n+ ('PeptideIdentification', 'MZ'),\n+ ('PeptideIdentification', 'RT'),\n+ ('PeptideIdentification', 'significance_threshold'),\n+ ('ProteinHit', 'score'),\n+ ('ProteinIdentification', 'significance_threshold'),\n+ ('SearchParameters', 'peak_mass_tolerance'),\n+ ('SearchParameters', 'precursor_peak_tolerance'),\n+ ('UnassignedPeptideIdentification', 'MZ'),\n+ ('UnassignedPeptideIdentification', 'RT'),\n+ ('UnassignedPeptideIdentification', 'significance_threshold'),\n+ ('pt', 'x'), ('pt', 'y'), ('position', 'position'),\n+ ('feature', 'overallquality'), ('feature', 'intensity')\n+ },\n+ 'intlists': set(),\n+ 'ints': {('PeptideHit', 'charge'),\n+ ('PeptideIdentification', 'spectrum_reference'),\n+ ('SearchParameters', 'missed_cleavages'),\n+ ('UnassignedPeptideIdentification', 'spectrum_reference'),\n+ ('featureList', 'count'), ('convexhull', 'nr'),\n+ ('position', 'dim'), ('feature', 'spectrum_index'),\n+ ('feature', 'charge'), ('quality', 'dim'), ('quality', 'quality')},\n+ 'lists': {'FixedModification', 'IdentificationRun',\n+ 'PeptideHit', 'PeptideIdentification', 'ProteinHit',\n+ 'UnassignedPeptideIdentification', 'VariableModification',\n+ 'convexhull', 'dataProcessing', 'feature', 'hposition',\n+ 'hullpoint', 'param', 'position', 'processingAction',\n+ 'pt', 'quality', 'userParam'}}\n+\n+_tandem_schema_defaults = {'ints': {\n+ ('group', 'z'), ('aa', 'at')} | {('domain', k) for k in [\n+ 'missed_cleavages', 'start', 'end', 'y_ions', 'b_ions',\n+ 'a_ions', 'x_ions', 'c_ions', 'z_ions']},\n+\n+ 'floats': {('group', k) for k in [\n+ 'fI', 'sumI', 'maxI', 'mh', 'expect', 'rt']} | {\n+ ('domain', k) for k in [\n+ 'expect', 'hyperscore', 'b_score', 'y_score',\n+ 'a_score', 'x_score', 'c_score', 'z_score',\n+ 'nextscore', 'delta', 'mh']} | {\n+ ('protein', 'expect'), ('protein', 'sumI'),\n+ ('aa', 'modified')},\n+\n+ 'bools': set(),\n+ 'lists': {'group', 'trace', 'attribute', 'protein', 'aa', 'note'},\n+ 'floatlists': {('values', 'values')},\n+ 'intlists': set(), 'charlists': set()}\n+\n+_mzxml_schema_defaults = {'bools': {('dataProcessing', 'centroided'),\n+ ('dataProcessing', 'chargeDeconvoluted'),\n+ ('dataProcessing', 'deisotoped'),\n+ ('dataProcessing', 'spotIntegration'),\n+ ('maldi', 'collisionGas'),\n+ ('scan', 'centroided'),\n+ ('scan', 'chargeDeconvoluted'),\n+ ('scan', 'deisotoped')},\n+ 'charlists': set(),\n+ 'floatlists': set(),\n+ 'floats': {('dataProcessing', 'intensityCutoff'),\n+ ('precursorMz', 'precursorIntensity'),\n+ ('precursorMz', 'windowWideness'),\n+ ('precursorMz', 'precursorMz'),\n+ ('scan', 'basePeakIntensity'),\n+ ('scan', 'basePeakMz'),\n+ ('scan', 'cidGasPressure'),\n+ ('scan', 'collisionEnergy'),\n+ ('scan', 'compensationVoltage'),\n+ ('scan', 'endMz'),\n+ ('scan', 'highMz'),\n+ ('scan', 'ionisationEnergy'),\n+ ('scan', 'lowMz'),\n+ ('scan', 'startMz'),\n+ ('scan', 'totIonCurrent')},\n+ 'duration': {(\"scan\", \"retentionTime\")\n+ },\n+ 'intlists': set(),\n+ 'ints': {('msInstrument', 'msInstrumentID'),\n+ ('peaks', 'compressedLen'),\n+ ('precursorMz', 'precursorCharge'),\n+ ('robot', 'deadVolume'),\n+ ('scan', 'msInstrumentID'),\n+ ('scan', 'peaksCount'),\n+ ('scanOrigin', 'num'),\n+ ('scan', 'msLevel')},\n+ 'lists': {'dataProcessing',\n+ 'msInstrument',\n+ 'parentFile',\n+ 'peaks',\n+ 'plate',\n+ 'precursorMz',\n+ 'scanOrigin',\n+ 'spot'}}\n+\n+_mzml_schema_defaults = {'ints': {\n+ ('spectrum', 'index'),\n+ ('instrumentConfigurationList', 'count'),\n+ ('binaryDataArray', 'encodedLength'),\n+ ('cvList', 'count'),\n+ ('binaryDataArray', 'arrayLength'),\n+ ('scanWindowList', 'count'),\n+ ('componentList', 'count'),\n+ ('sourceFileList', 'count'),\n+ ('productList', 'count'),\n+ ('referenceableParamGroupList', 'count'),\n+ ('scanList', 'count'),\n+ ('spectrum', 'defaultArrayLength'),\n+ ('dataProcessingList', 'count'),\n+ ('sourceFileRefList', 'count'),\n+ ('scanSettingsList', 'count'),\n+ ('selectedIonList', 'count'),\n+ ('chromatogram', 'defaultArrayLength'),\n+ ('precursorList', 'count'),\n+ ('chromatogram', 'index'),\n+ ('processingMethod', 'order'),\n+ ('targetList', 'count'),\n+ ('sampleList', 'count'),\n+ ('softwareList', 'count'),\n+ ('binaryDataArrayList', 'count'),\n+ ('spectrumList', 'count'),\n+ ('chromatogramList', 'count')},\n+ 'floats': {},\n+ 'bools': {},\n+ 'lists': {'scan', 'spectrum', 'sample', 'cv', 'dataProcessing',\n+ 'cvParam', 'source', 'userParam', 'detector', 'product',\n+ 'referenceableParamGroupRef', 'selectedIon', 'sourceFileRef',\n+ 'binaryDataArray', 'analyzer', 'scanSettings',\n+ 'instrumentConfiguration', 'chromatogram', 'target',\n+ 'processingMethod', 'precursor', 'sourceFile',\n+ 'referenceableParamGroup', 'contact', 'scanWindow', 'software'},\n+ 'intlists': {},\n+ 'floatlists': {},\n+ 'charlists': {}}\n+\n+_pepxml_schema_defaults = {'ints':\n+ {('xpressratio_summary', 'xpress_light'),\n+ ('distribution_point', 'obs_5_distr'),\n+ ('distribution_point', 'obs_2_distr'),\n+ ('enzymatic_search_constraint', 'max_num_internal_cleavages'),\n+ ('asapratio_lc_heavypeak', 'right_valley'),\n+ ('libra_summary', 'output_type'),\n+ ('distribution_point', 'obs_7_distr'),\n+ ('spectrum_query', 'index'),\n+ ('data_filter', 'number'),\n+ ('roc_data_point', 'num_incorr'),\n+ ('search_hit', 'num_tol_term'),\n+ ('search_hit', 'num_missed_cleavages'),\n+ ('asapratio_lc_lightpeak', 'right_valley'),\n+ ('libra_summary', 'normalization'),\n+ ('specificity', 'min_spacing'),\n+ ('database_refresh_timestamp', 'min_num_enz_term'),\n+ ('enzymatic_search_constraint', 'min_number_termini'),\n+ ('xpressratio_result', 'light_lastscan'),\n+ ('distribution_point', 'obs_3_distr'),\n+ ('spectrum_query', 'end_scan'),\n+ ('analysis_result', 'id'),\n+ ('search_database', 'size_in_db_entries'),\n+ ('search_hit', 'hit_rank'),\n+ ('alternative_protein', 'num_tol_term'),\n+ ('search_hit', 'num_tot_proteins'),\n+ ('asapratio_summary', 'elution'),\n+ ('search_hit', 'tot_num_ions'),\n+ ('error_point', 'num_incorr'),\n+ ('mixture_model', 'precursor_ion_charge'),\n+ ('roc_data_point', 'num_corr'),\n+ ('search_hit', 'num_matched_ions'),\n+ ('dataset_derivation', 'generation_no'),\n+ ('xpressratio_result', 'heavy_firstscan'),\n+ ('xpressratio_result', 'heavy_lastscan'),\n+ ('error_point', 'num_corr'),\n+ ('spectrum_query', 'assumed_charge'),\n+ ('analysis_timestamp', 'id'),\n+ ('xpressratio_result', 'light_firstscan'),\n+ ('distribution_point', 'obs_4_distr'),\n+ ('asapratio_lc_heavypeak', 'left_valley'),\n+ ('fragment_masses', 'channel'),\n+ ('distribution_point', 'obs_6_distr'),\n+ ('affected_channel', 'channel'),\n+ ('search_result', 'search_id'),\n+ ('contributing_channel', 'channel'),\n+ ('asapratio_lc_lightpeak', 'left_valley'),\n+ ('asapratio_peptide_data', 'area_flag'),\n+ ('search_database', 'size_of_residues'),\n+ ('asapratio_peptide_data', 'cidIndex'),\n+ ('mixture_model', 'num_iterations'),\n+ ('mod_aminoacid_mass', 'position'),\n+ ('spectrum_query', 'start_scan'),\n+ ('asapratio_summary', 'area_flag'),\n+ ('mixture_model', 'tot_num_spectra'),\n+ ('search_summary', 'search_id'),\n+ ('xpressratio_timestamp', 'xpress_light'),\n+ ('distribution_point', 'obs_1_distr'),\n+ ('intensity', 'channel'),\n+ ('asapratio_contribution', 'charge'),\n+ ('libra_summary', 'centroiding_preference')},\n+ 'floats':\n+ {('asapratio_contribution', 'error'),\n+ ('asapratio_lc_heavypeak', 'area_error'),\n+ ('modification_info', 'mod_nterm_mass'),\n+ ('distribution_point', 'model_4_neg_distr'),\n+ ('distribution_point', 'model_5_pos_distr'),\n+ ('spectrum_query', 'precursor_neutral_mass'),\n+ ('asapratio_lc_heavypeak', 'time_width'),\n+ ('xpressratio_summary', 'masstol'),\n+ ('affected_channel', 'correction'),\n+ ('distribution_point', 'model_7_neg_distr'),\n+ ('error_point', 'error'),\n+ ('intensity', 'target_mass'),\n+ ('roc_data_point', 'sensitivity'),\n+ ('distribution_point', 'model_4_pos_distr'),\n+ ('distribution_point', 'model_2_neg_distr'),\n+ ('distribution_point', 'model_3_pos_distr'),\n+ ('mixture_model', 'prior_probability'),\n+ ('roc_data_point', 'error'),\n+ ('intensity', 'normalized'),\n+ ('modification_info', 'mod_cterm_mass'),\n+ ('asapratio_lc_lightpeak', 'area_error'),\n+ ('distribution_point', 'fvalue'),\n+ ('distribution_point', 'model_1_neg_distr'),\n+ ('peptideprophet_summary', 'min_prob'),\n+ ('asapratio_result', 'mean'),\n+ ('point', 'pos_dens'),\n+ ('fragment_masses', 'mz'),\n+ ('mod_aminoacid_mass', 'mass'),\n+ ('distribution_point', 'model_6_neg_distr'),\n+ ('asapratio_lc_lightpeak', 'time_width'),\n+ ('asapratio_result', 'heavy2light_error'),\n+ ('peptideprophet_result', 'probability'),\n+ ('error_point', 'min_prob'),\n+ ('peptideprophet_summary', 'est_tot_num_correct'),\n+ ('roc_data_point', 'min_prob'),\n+ ('asapratio_result', 'heavy2light_mean'),\n+ ('distribution_point', 'model_5_neg_distr'),\n+ ('mixturemodel', 'neg_bandwidth'),\n+ ('asapratio_result', 'error'),\n+ ('xpressratio_result', 'light_mass'),\n+ ('point', 'neg_dens'),\n+ ('asapratio_lc_lightpeak', 'area'),\n+ ('distribution_point', 'model_1_pos_distr'),\n+ ('xpressratio_result', 'mass_tol'),\n+ ('mixturemodel', 'pos_bandwidth'),\n+ ('xpressratio_result', 'light_area'),\n+ ('asapratio_peptide_data', 'heavy_mass'),\n+ ('distribution_point', 'model_2_pos_distr'),\n+ ('search_hit', 'calc_neutral_pep_mass'),\n+ ('intensity', 'absolute'),\n+ ('asapratio_peptide_data', 'light_mass'),\n+ ('distribution_point', 'model_3_neg_distr'),\n+ ('aminoacid_modification', 'mass'),\n+ ('asapratio_lc_heavypeak', 'time'),\n+ ('asapratio_lc_lightpeak', 'time'),\n+ ('asapratio_lc_lightpeak', 'background'),\n+ ('mixture_model', 'est_tot_correct'),\n+ ('point', 'value'),\n+ ('asapratio_lc_heavypeak', 'background'),\n+ ('terminal_modification', 'mass'),\n+ ('fragment_masses', 'offset'),\n+ ('xpressratio_result', 'heavy_mass'),\n+ ('search_hit', 'protein_mw'),\n+ ('libra_summary', 'mass_tolerance'),\n+ ('spectrum_query', 'retention_time_sec'),\n+ ('distribution_point', 'model_7_pos_distr'),\n+ ('asapratio_lc_heavypeak', 'area'),\n+ ('alternative_protein', 'protein_mw'),\n+ ('asapratio_contribution', 'ratio'),\n+ ('xpressratio_result', 'heavy_area'),\n+ ('distribution_point', 'model_6_pos_distr')},\n+ 'bools':\n+ {('sample_enzyme', 'independent'),\n+ ('intensity', 'reject'),\n+ ('libra_result', 'is_rejected')},\n+ 'intlists': set(),\n+ 'floatlists': set(),\n+ 'charlists': set(),\n+ 'lists': {'point', 'aminoacid_modification', 'msms_run_summary',\n+ 'mixturemodel', 'search_hit', 'mixturemodel_distribution',\n+ 'sequence_search_constraint', 'specificity', 'alternative_protein',\n+ 'analysis_result', 'data_filter', 'fragment_masses', 'error_point',\n+ 'parameter', 'spectrum_query', 'search_result', 'affected_channel',\n+ 'analysis_summary', 'roc_data_point', 'distribution_point',\n+ 'search_summary', 'mod_aminoacid_mass', 'search_score', 'intensity',\n+ 'analysis_timestamp', 'mixture_model', 'terminal_modification',\n+ 'contributing_channel', 'inputfile'}}\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add new schema defaults file |
377,522 | 24.02.2018 02:24:47 | -10,800 | 347e42d48f8b5951b88765211b6525273bcdedb4 | Fix (strip composition building kwargs inside mass.isotopologues) | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -152,6 +152,7 @@ class Composition(BasicComposition):\nThe main improvement over dict is that Composition objects allow\nadding and subtraction.\n\"\"\"\n+ _kw_sources = {'formula', 'sequence', 'parsed_sequence', 'split_sequence'}\ndef _from_parsed_sequence(self, parsed_sequence, aa_comp):\nself.clear()\n@@ -283,12 +284,11 @@ class Composition(BasicComposition):\naa_comp = kwargs.get('aa_comp', std_aa_comp)\nmass_data = kwargs.get('mass_data', nist_mass)\n- kw_sources = {'formula', 'sequence', 'parsed_sequence',\n- 'split_sequence'}\n- kw_given = kw_sources.intersection(kwargs)\n+\n+ kw_given = self._kw_sources.intersection(kwargs)\nif len(kw_given) > 1:\nraise PyteomicsError('Only one of {} can be specified!\\n'\n- 'Given: {}'.format(', '.join(kw_sources),\n+ 'Given: {}'.format(', '.join(self._kw_sources),\n', '.join(kw_given)))\nelif kw_given:\nkwa = kw_given.pop()\n@@ -685,6 +685,9 @@ def isotopologues(*args, **kwargs):\nelements_with_isotopes = kwargs.get('elements_with_isotopes')\nreport_abundance = kwargs.get('report_abundance', False)\ncomposition = Composition(kwargs['composition']) if 'composition' in kwargs else Composition(*args, **kwargs)\n+ other_kw = kwargs.copy()\n+ for k in Composition._kw_sources:\n+ other_kw.pop(k, None)\ndict_elem_isotopes = {}\nfor element in composition:\n@@ -702,10 +705,11 @@ def isotopologues(*args, **kwargs):\nfor elementXn in combinations_with_replacement(list_isotopes, n):\nlist_comb_element_n.append(elementXn)\nall_isotoplogues.append(list_comb_element_n)\n+\nfor isotopologue in product(*all_isotoplogues):\n- ic = Composition(formula=''.join(atom for el in isotopologue for atom in el), **kwargs)\n+ ic = Composition(formula=''.join(atom for el in isotopologue for atom in el), **other_kw)\nif report_abundance or overall_threshold > 0.0:\n- abundance = isotopic_composition_abundance(composition=ic, **kwargs)\n+ abundance = isotopic_composition_abundance(composition=ic, **other_kw)\nif abundance > overall_threshold:\nif report_abundance:\nyield (ic, abundance)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mass.py",
"new_path": "tests/test_mass.py",
"diff": "@@ -38,26 +38,21 @@ class MassTest(unittest.TestCase):\n''.join([random.choice('XYZ') for i in range(20)])\nfor i in range(10)]\n- self.aa_comp = {'X': mass.Composition({'A': 1},\n- mass_data=self.mass_data),\n- 'Y': mass.Composition({'B': 1},\n- mass_data=self.mass_data),\n- 'Z': mass.Composition({'C': 1},\n- mass_data=self.mass_data),\n- 'F': mass.Composition({'F': 1},\n- mass_data=self.mass_data),\n- 'H-': mass.Composition({'D': 1},\n- mass_data=self.mass_data),\n- '-OH': mass.Composition({'E': 1},\n- mass_data=self.mass_data),\n+ self.aa_comp = {\n+ 'X': mass.Composition({'A': 1}, mass_data=self.mass_data),\n+ 'Y': mass.Composition({'B': 1}, mass_data=self.mass_data),\n+ 'Z': mass.Composition({'C': 1}, mass_data=self.mass_data),\n+ 'F': mass.Composition({'F': 1}, mass_data=self.mass_data),\n+ 'H-': mass.Composition({'D': 1}, mass_data=self.mass_data),\n+ '-OH': mass.Composition({'E': 1}, mass_data=self.mass_data),\n}\n- self.ion_comp = {'M': mass.Composition({},\n- mass_data=self.mass_data),\n- 'a': mass.Composition({'A': -1},\n- mass_data=self.mass_data)}\n- self.mods = {'a': mass.Composition(A=1),\n- 'b': mass.Composition(B=1)}\n+ self.ion_comp = {\n+ 'M': mass.Composition({}, mass_data=self.mass_data),\n+ 'a': mass.Composition({'A': -1}, mass_data=self.mass_data)\n+ }\n+\n+ self.mods = {'a': mass.Composition(A=1), 'b': mass.Composition(B=1)}\nself.d = {atom: 1 for atom in 'ABCDE'}\ndef test_fast_mass(self):\n@@ -290,13 +285,26 @@ class MassTest(unittest.TestCase):\npeptide = 'XYF'\nstates = [{'F[6]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}, {'F[7]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}]\nabundances = [0.7, 0.3]\n- for state in mass.isotopologues(peptide, elements_with_isotopes='F',\n- aa_comp=self.aa_comp, mass_data=self.mass_data):\n+ kw_common = dict(elements_with_isotopes='F', aa_comp=self.aa_comp, mass_data=self.mass_data)\n+ kwlist = [\n+ {},\n+ {'sequence': 'XYF'},\n+ {'parsed_sequence': parser.parse('XYF', show_unmodified_termini=True)},\n+ {'split_sequence': parser.parse('XYF', show_unmodified_termini=True, split=True)},\n+ {'formula': 'ABDEF'},\n+ {'composition': mass.Composition(sequence='XYF', aa_comp=self.aa_comp)}]\n+ arglist = [(peptide,), (), (), (), (), ()]\n+ for args, kw in zip(arglist, kwlist):\n+ kwargs = kw_common.copy()\n+ kwargs.update(kw)\n+ isotopologues = mass.isotopologues(*args, **kwargs)\n+ for state in isotopologues:\ni = states.index(state)\nself.assertNotEqual(i, -1)\nself.assertAlmostEqual(abundances[i], mass.isotopic_composition_abundance(state,\naa_comp=self.aa_comp, mass_data=self.mass_data))\n+\ndef test_isotopologues_with_abundances(self):\npeptide = 'XYF'\nstates = [{'F[6]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}, {'F[7]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}]\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix #2 (strip composition building kwargs inside mass.isotopologues) |
377,522 | 03.03.2018 16:19:02 | -10,800 | 73824d7401857e43bdf8a881426f85f6ed659d0b | Enable huge_tree everywhere in xml and mzml | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -384,7 +384,8 @@ class PreIndexedMzML(MzML):\nindex = {}\nself._source.seek(offset)\ntry:\n- for event, elem in etree.iterparse(self._source, events=('start', 'end'), remove_comments=True):\n+ for event, elem in etree.iterparse(self._source, events=('start', 'end'), remove_comments=True,\n+ huge_tree=True):\nif event == 'start':\nif elem.tag == 'index':\nindex = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -182,7 +182,7 @@ class XML(FileReader):\nA (version, schema URL) tuple, both elements are strings or None.\n\"\"\"\nfor _, elem in etree.iterparse(\n- self._source, events=('start',), remove_comments=True):\n+ self._source, events=('start',), remove_comments=True, huge_tree=True):\nif _local_name(elem) == self._root_element:\nreturn (elem.attrib.get('version'),\nelem.attrib.get(('{{{}}}'.format(elem.nsmap['xsi'])\n@@ -458,7 +458,7 @@ class XML(FileReader):\nstack = 0\nid_dict = {}\nfor event, elem in etree.iterparse(self._source, events=('start', 'end'),\n- remove_comments=True):\n+ remove_comments=True, huge_tree=True):\nif event == 'start':\nif 'id' in elem.attrib:\nstack += 1\n@@ -492,7 +492,7 @@ class XML(FileReader):\nif id_key is None:\nid_key = self._default_id_attr\nfor event, elem in etree.iterparse(\n- self._source, events=('start', 'end'), remove_comments=True):\n+ self._source, events=('start', 'end'), remove_comments=True, huge_tree=True):\nif event == 'start':\nif elem.attrib.get(id_key) == elem_id:\nfound = True\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Enable huge_tree everywhere in xml and mzml |
377,522 | 04.03.2018 00:40:08 | -10,800 | b4f26d7908d7ee01878c003f04235f9bc8b049bc | Add huge_tree to keyword arguments (fixes | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -270,7 +270,7 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\ninfo[k] = int(info[k])\nreturn info\n-def read(source, read_schema=False, iterative=True, use_index=False, dtype=None):\n+def read(source, read_schema=False, iterative=True, use_index=False, dtype=None, huge_tree=False):\n\"\"\"Parse `source` and iterate through spectra.\nParameters\n@@ -302,6 +302,12 @@ def read(source, read_schema=False, iterative=True, use_index=False, dtype=None)\n(under \"m/z array\", \"intensity array\", etc.).\nDefault is :py:const:`True`.\n+ huge_tree : bool, optional\n+ This option is passed to the `lxml` parser and defines whether\n+ security checks for XML tree depth and node size should be disabled.\n+ Default is :py:const:`False`.\n+ Enable this option for trusted files to avoid XMLSyntaxError exception.\n+\nReturns\n-------\nout : iterator\n@@ -309,7 +315,7 @@ def read(source, read_schema=False, iterative=True, use_index=False, dtype=None)\n\"\"\"\nreturn MzML(source, read_schema=read_schema, iterative=iterative,\n- use_index=use_index, dtype=dtype)\n+ use_index=use_index, dtype=dtype, huge_tree=huge_tree)\ndef iterfind(source, path, **kwargs):\n\"\"\"Parse `source` and yield info on elements with specified local\n@@ -384,8 +390,7 @@ class PreIndexedMzML(MzML):\nindex = {}\nself._source.seek(offset)\ntry:\n- for event, elem in etree.iterparse(self._source, events=('start', 'end'), remove_comments=True,\n- huge_tree=True):\n+ for event, elem in etree.iterparse(self._source, events=('start', 'end'), remove_comments=True):\nif event == 'start':\nif elem.tag == 'index':\nindex = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -121,6 +121,7 @@ class XML(FileReader):\n_structures_to_flatten = []\n_schema_location_param = 'schemaLocation'\n_default_id_attr = 'id'\n+ _huge_tree = False\n# Configurable plugin logic\n_converters = XMLValueConverter.converters()\n@@ -151,6 +152,12 @@ class XML(FileReader):\nshould be built and stored on the instance. It is used in\n:py:meth:`XML.get_by_id`, e.g. when using\n:py:class:`pyteomics.mzid.MzIdentML` with ``retrieve_refs=True``.\n+ huge_tree : bool, optional\n+ This option is passed to the `lxml` parser and defines whether\n+ security checks for XML tree depth and node size should be disabled.\n+ Default is :py:const:`False`.\n+ Enable this option for trusted files to avoid XMLSyntaxError exceptions\n+ (e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).\n\"\"\"\nsuper(XML, self).__init__(source, 'rb', self.iterfind, False,\n@@ -170,6 +177,7 @@ class XML(FileReader):\nself.schema_info = self._get_schema_info(read_schema)\nself._converters_items = self._converters.items()\n+ self._huge_tree = kwargs.get('huge_tree', self._huge_tree)\n@_keepstate\ndef _get_version_info(self):\n@@ -182,7 +190,7 @@ class XML(FileReader):\nA (version, schema URL) tuple, both elements are strings or None.\n\"\"\"\nfor _, elem in etree.iterparse(\n- self._source, events=('start',), remove_comments=True, huge_tree=True):\n+ self._source, events=('start',), remove_comments=True, huge_tree=self._huge_tree):\nif _local_name(elem) == self._root_element:\nreturn (elem.attrib.get('version'),\nelem.attrib.get(('{{{}}}'.format(elem.nsmap['xsi'])\n@@ -427,7 +435,7 @@ class XML(FileReader):\nlocalname = nodes[0].lower()\nfound = False\nfor ev, elem in etree.iterparse(self, events=('start', 'end'),\n- remove_comments=True, huge_tree=True):\n+ remove_comments=True, huge_tree=self._huge_tree):\nname_lc = _local_name(elem).lower()\nif ev == 'start':\nif name_lc == localname or localname == '*':\n@@ -458,7 +466,7 @@ class XML(FileReader):\nstack = 0\nid_dict = {}\nfor event, elem in etree.iterparse(self._source, events=('start', 'end'),\n- remove_comments=True, huge_tree=True):\n+ remove_comments=True, huge_tree=self._huge_tree):\nif event == 'start':\nif 'id' in elem.attrib:\nstack += 1\n@@ -492,7 +500,7 @@ class XML(FileReader):\nif id_key is None:\nid_key = self._default_id_attr\nfor event, elem in etree.iterparse(\n- self._source, events=('start', 'end'), remove_comments=True, huge_tree=True):\n+ self._source, events=('start', 'end'), remove_comments=True, huge_tree=self._huge_tree):\nif event == 'start':\nif elem.attrib.get(id_key) == elem_id:\nfound = True\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add huge_tree to keyword arguments (fixes #23) |
377,522 | 04.03.2018 20:22:20 | -10,800 | 18d9dda6799d80dd2b3ca6461af7a7bb27e616ba | Add `composition` to Composition._kw_sources, rename _from_dict to _from_composition | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -152,7 +152,7 @@ class Composition(BasicComposition):\nThe main improvement over dict is that Composition objects allow\nadding and subtraction.\n\"\"\"\n- _kw_sources = {'formula', 'sequence', 'parsed_sequence', 'split_sequence'}\n+ _kw_sources = {'formula', 'sequence', 'parsed_sequence', 'split_sequence', 'composition'}\ndef _from_parsed_sequence(self, parsed_sequence, aa_comp):\nself.clear()\n@@ -171,7 +171,7 @@ class Composition(BasicComposition):\nexcept (PyteomicsError, KeyError):\nraise PyteomicsError(\n'No information for %s in `aa_comp`' % aa)\n- self._from_dict(comp)\n+ self._from_composition(comp)\ndef _from_split_sequence(self, split_sequence, aa_comp):\nself.clear()\n@@ -190,9 +190,8 @@ class Composition(BasicComposition):\ni = j\nbreak\nif j == 0:\n- raise PyteomicsError(\"Invalid group starting from \"\n- \"position %d: %s\" % (i+1, group))\n- self._from_dict(comp)\n+ raise PyteomicsError(\"Invalid group starting from position %d: %s\" % (i+1, group))\n+ self._from_composition(comp)\ndef _from_sequence(self, sequence, aa_comp):\nparsed_sequence = parser.parse(\n@@ -210,14 +209,13 @@ class Composition(BasicComposition):\nself[_make_isotope_string(elem, int(isotope) if isotope else 0)\n] += int(number) if number else 1\n- def _from_dict(self, comp):\n+ def _from_composition(self, comp):\nfor isotope_string, num_atoms in comp.items():\nelement_name, isotope_num = _parse_isotope_string(\nisotope_string)\n# Remove explicitly undefined isotopes (e.g. X[0]).\n- self[_make_isotope_string(element_name, isotope_num)] = (\n- num_atoms)\n+ self[_make_isotope_string(element_name, isotope_num)] = num_atoms\ndef __init__(self, *args, **kwargs):\n\"\"\"\n@@ -298,7 +296,7 @@ class Composition(BasicComposition):\n# can't build from kwargs\nelif args:\nif isinstance(args[0], dict):\n- self._from_dict(args[0])\n+ self._from_composition(args[0])\nelif isinstance(args[0], str):\ntry:\nself._from_sequence(args[0], aa_comp)\n@@ -312,15 +310,14 @@ class Composition(BasicComposition):\n'formula'.format(args[0]))\nelse:\ntry:\n- self._from_sequence(parser.tostring(args[0], True),\n- aa_comp)\n+ self._from_sequence(parser.tostring(args[0], True), aa_comp)\nexcept:\nraise PyteomicsError('Could not create a Composition object'\n' from `{}`. A Composition object must be '\n'specified by sequence, parsed or split sequence,'\n' formula or dict.'.format(args[0]))\nelse:\n- self._from_dict(kwargs)\n+ self._from_composition(kwargs)\nion_comp = kwargs.get('ion_comp', std_ion_comp)\nif 'ion_type' in kwargs:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add `composition` to Composition._kw_sources, rename _from_dict to _from_composition |
377,522 | 05.03.2018 13:44:25 | -10,800 | 71560608878b7574dfb7e9241f310146c9229329 | Add huge_tree in mzxml, update changelog | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-3.4.3\n------\n+3.5\n+---\n- - Change the default value for `read_schema` to :py:const:`False` in\n- XML parsing modules.\n+ - Preserve accession information on cvParam elements in mzML parser.\n+ Dictionaries produced by the parser can now be queried by accession using\n+ :py:func:`pyteomics.auxiliary.cvquery`.\n+ (Contributed by J. Klein)\n- Add optional `decode_binary` argument in\n:py:class:`pyteomics.mzml.MzML` and :py:class:`pyteomics.mzxml.MzXML`.\n+ When set to `False`, the parsers provide binary records suitable for decoding on demand.\n+ (Contributed by J. Klein)\n- Add method :py:meth:`write_byte_offsets` in :py:class:`pyteomics.mzml.MzML`,\n:py:class:`pyteomics.mzxml.MzXML` and :py:class:`pyteomics.mzid.MzIdentML`.\n+ Byte offsets can be loaded later to speed up random access.\n+ (Contributed by J. Klein)\n+\n+ - Random access to MGF spectrum entries.\n- Add function :py:func:`pyteomics.mgf.get_spectrum`.\n- Add class :py:class:`pyteomics.mgf.MGF`. :py:func:`mgf.read` is now an alias to the class.\nThe class can be used for indexing using spectrum titles.\n- - Minor fixes.\n+ This functionality will be changed in upcoming versions.\n+\n+ - New parameter `huge_tree` in XML parser constructors and :py:func:`read` functions.\n+ It is passed to the underlying :py:mod:`lxml` calls. Default value is `False`.\n+ Set to `True` to overcome errors such as: `XMLSyntaxError: xmlSAX2Characters: huge text node`.\n+\n+ - Change the default value for `read_schema` to :py:const:`False` in XML parsing modules.\n+\n+ - New parameter `keep_cterm` in decoy generation functions in :py:mod:`pyteomics.fasta`.\n+\n+ - Bugfixes.\n3.4.2\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.4.3b0\n\\ No newline at end of file\n+3.5b0\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -306,7 +306,8 @@ def read(source, read_schema=False, iterative=True, use_index=False, dtype=None,\nThis option is passed to the `lxml` parser and defines whether\nsecurity checks for XML tree depth and node size should be disabled.\nDefault is :py:const:`False`.\n- Enable this option for trusted files to avoid XMLSyntaxError exception.\n+ Enable this option for trusted files to avoid XMLSyntaxError exceptions\n+ (e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).\nReturns\n-------\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzxml.py",
"new_path": "pyteomics/mzxml.py",
"diff": "@@ -211,7 +211,7 @@ class MzXML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nfor item in super(MzXML, self).iterfind(path, **kwargs):\nyield item\n-def read(source, read_schema=False, iterative=True, use_index=False, dtype=None):\n+def read(source, read_schema=False, iterative=True, use_index=False, dtype=None, huge_tree=False):\n\"\"\"Parse `source` and iterate through spectra.\nParameters\n@@ -239,6 +239,13 @@ def read(source, read_schema=False, iterative=True, use_index=False, dtype=None)\n(under \"m/z array\", \"intensity array\", etc.).\nDefault is :py:const:`True`.\n+ huge_tree : bool, optional\n+ This option is passed to the `lxml` parser and defines whether\n+ security checks for XML tree depth and node size should be disabled.\n+ Default is :py:const:`False`.\n+ Enable this option for trusted files to avoid XMLSyntaxError exceptions\n+ (e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).\n+\nReturns\n-------\nout : iterator\n@@ -246,7 +253,7 @@ def read(source, read_schema=False, iterative=True, use_index=False, dtype=None)\n\"\"\"\nreturn MzXML(source, read_schema=read_schema, iterative=iterative,\n- use_index=use_index, dtype=dtype)\n+ use_index=use_index, dtype=dtype, huge_tree=huge_tree)\ndef iterfind(source, path, **kwargs):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add huge_tree in mzxml, update changelog |
377,522 | 05.03.2018 16:41:00 | -10,800 | 3d1168a7822fb3d516c0bb1b77ecc73a1bcae982 | Remove local sphinx exts | [
{
"change_type": "MODIFY",
"old_path": "doc/source/conf.py",
"new_path": "doc/source/conf.py",
"diff": "@@ -22,7 +22,7 @@ def add_directive_header(self, sig):\nDataDocumenter.add_directive_header = add_directive_header\n-sys.path.insert(0, os.path.abspath('exts'))\n+#sys.path.insert(0, os.path.abspath('exts'))\npyteomics_path = os.path.abspath('../../')\nsys.path.insert(0, pyteomics_path)\n@@ -51,7 +51,7 @@ extensions = [\n'matplotlib.sphinxext.only_directives',\n'matplotlib.sphinxext.mathmpl',\n'numpydoc',\n- 'googleanalytics'\n+ 'sphinxcontrib.googleanalytics'\n]\n# Enable Google Analytics via a script from\n"
},
{
"change_type": "DELETE",
"old_path": "doc/source/exts/googleanalytics.py",
"new_path": null,
"diff": "-#!/usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\n-from sphinx.application import ExtensionError\n-\n-def add_ga_javascript(app, pagename, templatename, context, doctree):\n- if not app.config.googleanalytics_enabled:\n- return\n-\n- metatags = context.get('metatags', '')\n- metatags += \"\"\"<script type=\"text/javascript\">\n-\n- var _gaq = _gaq || [];\n- _gaq.push(['_setAccount', '%s']);\n- _gaq.push(['_trackPageview']);\n-\n- (function() {\n- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;\n- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';\n- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);\n- })();\n- </script>\"\"\" % app.config.googleanalytics_id\n- context['metatags'] = metatags\n-\n-def check_config(app):\n- if not app.config.googleanalytics_id:\n- raise ExtensionError(\"'googleanalytics_id' config value must be set for ga statistics to function properly.\")\n-\n-def setup(app):\n- app.add_config_value('googleanalytics_id', '', 'html')\n- app.add_config_value('googleanalytics_enabled', True, 'html')\n- app.connect('html-page-context', add_ga_javascript)\n- app.connect('builder-inited', check_config)\n- return {'version': '0.1'}\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove local sphinx exts |
377,522 | 05.03.2018 17:59:51 | -10,800 | 41d794cd9883145622890dd742c1646f40e73106 | Try using bitbucket version of googleanalytics in rtd requirements | [
{
"change_type": "MODIFY",
"old_path": "doc/build-requirements.txt",
"new_path": "doc/build-requirements.txt",
"diff": "numpydoc\nmatplotlib\n-sphinxcontrib-googleanalytics\n\\ No newline at end of file\n+hg+https://bitbucket.org/birkenfeld/sphinx-contrib#egg=googleanalytics&subdirectory=googleanalytics\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Try using bitbucket version of googleanalytics in rtd requirements |
377,522 | 05.03.2018 18:53:18 | -10,800 | 43dbd0808bce53c9b2da5edc55613ee494912280 | Try adding badge to readme | [
{
"change_type": "MODIFY",
"old_path": "README",
"new_path": "README",
"diff": "+.. image:: http://readthedocs.org/projects/pyteomics/badge/?version=latest\n+ :target: http://pyteomics.readthedocs.io/en/latest/?badge=latest\n+ :alt: Documentation Status\n+\n+\nWhat is Pyteomics?\n------------------\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Try adding badge to readme |
377,522 | 06.03.2018 14:05:26 | -10,800 | 90435728e634d069032da53cc0101ef59c38655b | Add cvquery to auxiliary doc page | [
{
"change_type": "MODIFY",
"old_path": "doc/source/api/auxiliary.rst",
"new_path": "doc/source/api/auxiliary.rst",
"diff": ".. automodule:: pyteomics.auxiliary\n:exclude-members: Charge, ChargeList, BasicComposition, FileReader\n+\n+ .. autofunction:: cvquery\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary.py",
"new_path": "pyteomics/auxiliary.py",
"diff": "@@ -44,6 +44,10 @@ Helpers\n`memoization <http://stackoverflow.com/a/1988826/1258041>`_\n`function decorator <http://stackoverflow.com/a/1594484/1258041>`_.\n+ :py:func:`cvquery` - traverse an arbitrarily nested dictionary looking\n+ for keys which are :py:class:`cvstr` instances, or objects\n+ with an attribute called ``accession``.\n+\n-------------------------------------------------------------------------------\n\"\"\"\n@@ -1576,7 +1580,7 @@ class cvstr(str):\nclass CVQueryEngine(object):\n'''Traverse an arbitrarily nested dictionary looking\nfor keys which are :class:`cvstr` instances, or objects\n- with an attribute called ``accession``\n+ with an attribute called ``accession``.\n'''\ndef _accession(self, key):\n@@ -1636,7 +1640,7 @@ class CVQueryEngine(object):\ndef index(self, data):\n'''Construct a flat :class:`dict` whose keys are the\naccession numbers for all qualified keys in ``data``\n- and whose values are the mapped values from ``data``\n+ and whose values are the mapped values from ``data``.\n'''\nindex = self._walk_dict(data, {})\nreturn index\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add cvquery to auxiliary doc page |
377,522 | 10.03.2018 19:18:23 | -10,800 | 2fed6d2017d7cb0ddbd7a844bd8ec1aa32aa3e41 | Mention Controlled Vocabularies in docs | [
{
"change_type": "MODIFY",
"old_path": "doc/source/data.rst",
"new_path": "doc/source/data.rst",
"diff": "@@ -404,6 +404,7 @@ but it uses byte offset information found at the end of the file.\nUnlike the rest of the functions and classes, :py:class:`pyteomics.mzml.PreIndexedMzML`\ndoes not have a counterpart in :py:mod:`pyteomics.mzxml`.\n+\npepXML\n------\n@@ -710,6 +711,32 @@ of `(from; to)` pairs corresponding to original and transformed retention times:\nAs always, :py:func:`pyteomics.openms.trafoxml.read`\nand :py:class:`pyteomics.openms.trafoxml.TrafoXML` are interchangeable.\n+Controlled Vocabularies\n+=======================\n+\n+`Controlled Vocabularies <http://www.psidev.info/controlled-vocabularies>`_\n+are the universal annotation system used in the PSI formats, including\n+**mzML** and **mzIdentML**. :py:class:`pyteomics.mzml.MzML` and :py:class:`pyteomics.mzid.MzIdentML`\n+retain the annotation information. It can be accessed using the helper function, :py:func:`pyteomics.auxiliary.cvquery`:\n+\n+ >>> from pyteomics import auxiliary as aux, mzid, mzml\n+ >>> f = mzid.MzIdentML('tests/test.mzid')\n+ >>> s = next(f)\n+ >>> s\n+ {'SpectrumIdentificationItem': [{'ProteinScape:SequestMetaScore': 7.59488518903425, 'calculatedMassToCharge': 1507.695, 'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec1_pep1'}], 'chargeState': 1, 'passThreshold': True, 'peptide_ref': 'prot1_pep1', 'rank': 1, 'id': 'SEQ_spec1_pep1', 'ProteinScape:IntensityCoverage': 0.3919545603809718, 'experimentalMassToCharge': 1507.696}], 'spectrumID': 'databasekey=1', 'id': 'SEQ_spec1', 'spectraData_ref': 'LCMALDI_spectra'}\n+ >>> aux.cvquery(s)\n+ {'MS:1001506': 7.59488518903425, 'MS:1001505': 0.3919545603809718}\n+ >>> f.close()\n+\n+ >>> f = mzml.MzML('tests/test.mzML')\n+ >>> s = next(f)\n+ >>> s\n+ {'defaultArrayLength': 19914, 'intensity array': array([ 0., 0., 0., ..., 0., 0., 0.], dtype=float32), 'base peak m/z': 810.415283203125, 'highest observed m/z': 2000.0099466203771, 'index': 0, 'total ion current': 15245068.0, 'id': 'controllerType=0 controllerNumber=1 scan=1', 'count': 2, 'm/z array': array([ 200.00018817, 200.00043034, 200.00067252, ..., 1999.96151259,\n+ 1999.98572931, 2000.00994662]), 'ms level': 1, 'base peak intensity': 1471973.875, 'lowest observed m/z': 200.00018816645022, 'MSn spectrum': '', 'positive scan': '', 'scanList': {'count': 1, 'scan': [{'preset scan configuration': 1.0, 'scanWindowList': {'count': 1, 'scanWindow': [{'scan window lower limit': 200.0, 'scan window upper limit': 2000.0}]}, 'instrumentConfigurationRef': 'IC1', 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]', 'scan start time': 0.004935, '[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688}], 'no combination': ''}, 'profile spectrum': ''}\n+ >>> aux.cvquery(s, 'MS:1000285')\n+ 15245068.0\n+ >>> f.close()\n+\nFDR estimation and filtering\n============================\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Mention Controlled Vocabularies in docs |
377,522 | 14.03.2018 23:36:37 | -10,800 | 1318094ad7c14d544cf53df812571adf11b14df8 | Avoid numpy warning in cvquery | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary.py",
"new_path": "pyteomics/auxiliary.py",
"diff": "@@ -1589,7 +1589,7 @@ class CVQueryEngine(object):\ndef _query_dict(self, data, accession):\nfor key, value in data.items():\nif self._accession(key) == accession:\n- if value != '':\n+ if not isinstance(value, str) or value != '':\nreturn value\nelse:\nreturn key\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -6,7 +6,8 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom itertools import product\nimport unittest\n-from pyteomics.mzml import *\n+from pyteomics.mzml import MzML, PreIndexedMzML, read, chain\n+from pyteomics import auxiliary as aux, xml\nfrom data import mzml_spectra\nimport numpy as np\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Avoid numpy warning in cvquery |
377,522 | 15.03.2018 01:19:36 | -10,800 | 43db9a8c57b9cac224e3331c1a3f039f3fb9cae8 | Add skip_empty_cvparam_values kwarg in _handle_param | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -296,7 +296,8 @@ class XML(FileReader):\nunit_name = attribs.get(\"unitName\", unit_accesssion)\nunit_info = unit_name\naccession = attribs.get(\"accession\")\n- if 'value' in attribs:\n+ if 'value' in attribs and (kwargs.get('skip_empty_cvparam_values', False) or\n+ attribs['value'] != ''):\ntry:\nif attribs.get('type') in types:\nvalue = types[attribs['type']](attribs['value'], unit_info)\n@@ -317,7 +318,7 @@ class XML(FileReader):\nname = _local_name(element)\nschema_info = self.schema_info\nif name in {'cvParam', 'userParam'}:\n- return self._handle_param(element)\n+ return self._handle_param(element, **kwargs)\ninfo = dict(element.attrib)\n# process subelements\n@@ -903,7 +904,7 @@ class IndexedXML(XML):\nuse_index = kwargs.get('use_index', True)\nif tags is not None:\n- self._indexed_tags = (tags)\n+ self._indexed_tags = tags\nif tag_index_keys is not None:\nself._indexed_tag_keys = tag_index_keys\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add skip_empty_cvparam_values kwarg in _handle_param |
377,522 | 11.04.2018 00:08:37 | -10,800 | ce47fdd42f098892a6aaca787fe1a8b3f008c148 | Try creating df from generator in pepxml.DataFrame | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -348,8 +348,8 @@ def DataFrame(*args, **kwargs):\nout : pandas.DataFrame\n\"\"\"\nimport pandas as pd\n- data = []\nsep = kwargs.pop('sep', None)\n+ def gen_items():\nwith chain(*args, **kwargs) as f:\nfor item in f:\ninfo = {}\n@@ -377,8 +377,8 @@ def DataFrame(*args, **kwargs):\nfor k, v in sh.items():\nif isinstance(v, (str, int, float)):\ninfo[k] = v\n- data.append(info)\n- return pd.DataFrame(data)\n+ yield info\n+ return pd.DataFrame(gen_items())\ndef filter_df(*args, **kwargs):\n\"\"\"Read pepXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Try creating df from generator in pepxml.DataFrame |
377,522 | 11.04.2018 16:31:50 | -10,800 | 670568dbe583c6fa899d6432fc716652adb6ac14 | Pass kwargs to dataframe constructor in pepxml | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5b0\n\\ No newline at end of file\n+3.5b1\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -343,12 +343,17 @@ def DataFrame(*args, **kwargs):\nthis delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n:py:const:`None`.\n+ pd_kwargs : dict, optional\n+ Keyword arguments passed to the :py:class:`pandas.DataFrame` constructor.\n+\nReturns\n-------\nout : pandas.DataFrame\n\"\"\"\nimport pandas as pd\n+ kwargs = kwargs.copy()\nsep = kwargs.pop('sep', None)\n+ pd_kwargs = kwargs.pop('pd_kwargs', {})\ndef gen_items():\nwith chain(*args, **kwargs) as f:\nfor item in f:\n@@ -378,7 +383,7 @@ def DataFrame(*args, **kwargs):\nif isinstance(v, (str, int, float)):\ninfo[k] = v\nyield info\n- return pd.DataFrame(gen_items())\n+ return pd.DataFrame(gen_items(), **pd_kwargs)\ndef filter_df(*args, **kwargs):\n\"\"\"Read pepXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Pass kwargs to dataframe constructor in pepxml |
377,522 | 26.04.2018 19:26:03 | -10,800 | 9b652bed505ef7f8d8d95c552caa1ee83c23debe | Draft protxml parser | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/_schema_defaults.py",
"new_path": "pyteomics/_schema_defaults.py",
"diff": "+_protxml_schema_defaults = {'bools': set(),\n+ 'charlists': set(),\n+ 'floatlists': set(),\n+ 'floats': {('ASAPRatio', 'heavy2light_ratio_mean'),\n+ ('ASAPRatio', 'heavy2light_ratio_standard_dev'),\n+ ('ASAPRatio', 'ratio_mean'),\n+ ('ASAPRatio', 'ratio_standard_dev'),\n+ ('ASAPRatio_pvalue', 'adj_ratio_mean'),\n+ ('ASAPRatio_pvalue', 'adj_ratio_standard_dev'),\n+ ('ASAPRatio_pvalue', 'decimal_pvalue'),\n+ ('ASAPRatio_pvalue', 'heavy2light_adj_ratio_mean'),\n+ ('ASAPRatio_pvalue', 'heavy2light_adj_ratio_standard_dev'),\n+ ('ASAPRatio_pvalue', 'pvalue'),\n+ ('ASAP_Peak', 'heavy2light_ratio_mean'),\n+ ('ASAP_Peak', 'heavy2light_ratio_standard_dev'),\n+ ('ASAP_Peak', 'ratio_mean'),\n+ ('ASAP_Peak', 'ratio_standard_dev'),\n+ ('ASAP_Peak', 'weight'),\n+ ('ASAP_Seq', 'heavy2light_ratio_mean'),\n+ ('ASAP_Seq', 'heavy2light_ratio_standard_dev'),\n+ ('ASAP_Seq', 'ratio_mean'),\n+ ('ASAP_Seq', 'ratio_standard_dev'),\n+ ('ASAP_Seq', 'weight'),\n+ ('ASAP_prot_analysis_summary', 'min_peptide_probability'),\n+ ('ASAP_prot_analysis_summary', 'min_peptide_weight'),\n+ ('ASAP_prot_analysis_summary', 'min_protein_probability'),\n+ ('ASAP_pvalue_analysis_summary', 'background_fitting_error'),\n+ ('ASAP_pvalue_analysis_summary', 'background_ratio_mean'),\n+ ('ASAP_pvalue_analysis_summary', 'background_ratio_stdev'),\n+ ('StPeterQuant', 'SIn'),\n+ ('StPeterQuant', 'ng'),\n+ ('StPeterQuant_peptide', 'spectralIndex'),\n+ ('StPeter_analysis_summary', 'FDR'),\n+ ('StPeter_analysis_summary', 'probability'),\n+ ('StPeter_analysis_summary', 'sampleLoad'),\n+ ('StPeter_analysis_summary', 'tolerance'),\n+ ('XPress_analysis_summary', 'min_peptide_probability'),\n+ ('XPress_analysis_summary', 'min_peptide_weight'),\n+ ('XPress_analysis_summary', 'min_protein_probability'),\n+ ('affected_channel', 'correction'),\n+ ('decoy_analysis_summary', 'decoy_ratio'),\n+ ('error_point', 'error'),\n+ ('error_point', 'min_prob'),\n+ ('fpkm_distribution', 'alt_pos_to_neg_ratio'),\n+ ('fpkm_distribution', 'fpkm_lower_bound_excl'),\n+ ('fpkm_distribution', 'fpkm_lower_bound_incl'),\n+ ('fpkm_distribution', 'neg_freq'),\n+ ('fpkm_distribution', 'pos_freq'),\n+ ('fpkm_distribution', 'pos_to_neg_ratio'),\n+ ('fragment_masses', 'mz'),\n+ ('indistinguishable_peptide', 'calc_neutral_pep_mass'),\n+ ('intensity', 'error'),\n+ ('intensity', 'mz'),\n+ ('intensity', 'ratio'),\n+ ('libra_summary', 'mass_tolerance'),\n+ ('libra_summary', 'min_pep_prob'),\n+ ('libra_summary', 'min_pep_wt'),\n+ ('libra_summary', 'min_prot_prob'),\n+ ('ni_distribution', 'alt_pos_to_neg_ratio'),\n+ ('ni_distribution', 'neg_freq'),\n+ ('ni_distribution', 'ni_lower_bound_excl'),\n+ ('ni_distribution', 'ni_lower_bound_incl'),\n+ ('ni_distribution', 'pos_freq'),\n+ ('ni_distribution', 'pos_to_neg_ratio'),\n+ ('nsp_distribution', 'alt_pos_to_neg_ratio'),\n+ ('nsp_distribution', 'neg_freq'),\n+ ('nsp_distribution', 'nsp_lower_bound_excl'),\n+ ('nsp_distribution', 'nsp_lower_bound_incl'),\n+ ('nsp_distribution', 'pos_freq'),\n+ ('nsp_distribution', 'pos_to_neg_ratio'),\n+ ('peptide', 'calc_neutral_pep_mass'),\n+ ('peptide', 'exp_sibling_ion_bin'),\n+ ('peptide', 'exp_sibling_ion_instances'),\n+ ('peptide', 'exp_tot_instances'),\n+ ('peptide', 'fpkm_adjusted_probability'),\n+ ('peptide', 'initial_probability'),\n+ ('peptide', 'max_fpkm'),\n+ ('peptide', 'n_sibling_peptides'),\n+ ('peptide', 'ni_adjusted_probability'),\n+ ('peptide', 'nsp_adjusted_probability'),\n+ ('peptide', 'weight'),\n+ ('point', 'fdr_pp'),\n+ ('point', 'fdr_pp_decoy'),\n+ ('point', 'logratio'),\n+ ('point', 'model_distr'),\n+ ('point', 'num_corr_pp'),\n+ ('point', 'num_corr_pp_decoy'),\n+ ('point', 'obs_distr'),\n+ ('point', 'pp_decoy_uncert'),\n+ ('point', 'pp_uncert'),\n+ ('point', 'prob_cutoff'),\n+ ('protein', 'confidence'),\n+ ('protein', 'percent_coverage'),\n+ ('protein', 'probability'),\n+ ('protein_group', 'probability'),\n+ ('protein_summary_data_filter', 'false_positive_error_rate'),\n+ ('protein_summary_data_filter', 'min_probability'),\n+ ('protein_summary_data_filter', 'predicted_num_correct'),\n+ ('protein_summary_data_filter', 'predicted_num_incorrect'),\n+ ('protein_summary_data_filter', 'sensitivity'),\n+ ('protein_summary_header', 'initial_min_peptide_prob'),\n+ ('protein_summary_header', 'min_peptide_probability'),\n+ ('protein_summary_header', 'min_peptide_weight'),\n+ ('protein_summary_header', 'num_predicted_correct_prots'),\n+ ('protein_summary_header', 'total_no_spectrum_ids')},\n+ 'intlists': set(),\n+ 'ints': {('ASAPRatio', 'ratio_number_peptides'),\n+ ('ASAP_Peak', 'datanum'),\n+ ('ASAP_Seq', 'datanum'),\n+ ('ASAP_pvalue_analysis_summary', 'asap_prot_id'),\n+ ('ASAP_pvalue_analysis_summary', 'asapratio_id'),\n+ ('StPeterQuant_peptide', 'charge'),\n+ ('affected_channel', 'channel'),\n+ ('analysis_result', 'id'),\n+ ('analysis_summary', 'id'),\n+ ('contributing_channel', 'channel'),\n+ ('error_point', 'num_corr'),\n+ ('error_point', 'num_incorr'),\n+ ('fpkm_distribution', 'bin_no'),\n+ ('fragment_masses', 'channel'),\n+ ('intensity', 'channel'),\n+ ('libra_result', 'number'),\n+ ('libra_summary', 'centroiding_preference'),\n+ ('libra_summary', 'normalization'),\n+ ('libra_summary', 'output_type'),\n+ ('ni_distribution', 'bin_no'),\n+ ('nsp_distribution', 'bin_no'),\n+ ('peptide', 'charge'),\n+ ('peptide', 'fpkm_bin'),\n+ ('peptide', 'n_enzymatic_termini'),\n+ ('peptide', 'n_instances'),\n+ ('peptide', 'n_sibling_peptides_bin'),\n+ ('protein', 'n_indistinguishable_proteins'),\n+ ('protein', 'total_number_distinct_peptides'),\n+ ('protein', 'total_number_peptides'),\n+ ('protein_summary_header', 'num_input_1_spectra'),\n+ ('protein_summary_header', 'num_input_2_spectra'),\n+ ('protein_summary_header', 'num_input_3_spectra'),\n+ ('protein_summary_header', 'num_input_4_spectra'),\n+ ('protein_summary_header', 'num_input_5_spectra')},\n+ 'lists': {'ASAP_Dta',\n+ 'ASAP_Peak',\n+ 'ASAP_Seq',\n+ 'StPeterQuant_peptide',\n+ 'affected_channel',\n+ 'analysis_result',\n+ 'analysis_summary',\n+ 'contributing_channel',\n+ 'error_point',\n+ 'fpkm_distribution',\n+ 'fpkm_information',\n+ 'fragment_masses',\n+ 'indistinguishable_peptide',\n+ 'indistinguishable_protein',\n+ 'intensity',\n+ 'mod_aminoacid_mass',\n+ 'modification_info',\n+ 'ni_distribution',\n+ 'ni_information',\n+ 'nsp_distribution',\n+ 'parameter',\n+ 'peptide',\n+ 'peptide_parent_protein',\n+ 'point',\n+ 'protein',\n+ 'protein_group',\n+ 'protein_summary_data_filter'}}\n+\n_mzid_schema_defaults = {'bools': {('Enzyme', 'semiSpecific'),\n('Enzymes', 'independent'),\n('PeptideEvidence', 'isDecoy'),\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -118,7 +118,7 @@ class PepXML(xml.XML):\nexcept KeyError:\nname = xml._local_name(element)\nrec = kwargs.pop('recursive', None)\n- if name == 'msms_pipeline_analysis':\n+ if name == self._root_element:\ninfo = self._get_info(element, ename=name,\nrecursive=(rec if rec is not None else False),\n**kwargs)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pyteomics/protxml.py",
"diff": "+from . import xml, auxiliary as aux, _schema_defaults\n+\n+class ProtXML(xml.XML):\n+ \"\"\"Parser class for pepXML files.\"\"\"\n+ file_format = 'protXML'\n+ _root_element = 'protein_summary'\n+ _default_schema = _schema_defaults._protxml_schema_defaults\n+ # _default_version = None\n+ _default_iter_tag = 'protein_group'\n+ _structures_to_flatten = {'modification_info', 'annotation'}\n+ # attributes which contain unconverted values\n+ _convert_items = {'float': {'pct_spectrum_ids'},\n+ 'int': {'group_number', 'prot_length'},\n+ 'bool': {'is_contributing_evidence', 'is_nondegenerate_evidence'}\n+ }.items()\n+\n+ def _get_info_smart(self, element, **kwargs):\n+ \"\"\"Extract the info in a smart way depending on the element type\"\"\"\n+ try:\n+ name = kwargs.pop('ename')\n+ except KeyError:\n+ name = xml._local_name(element)\n+ rec = kwargs.pop('recursive', None)\n+ if name == self._root_element:\n+ info = self._get_info(element, ename=name,\n+ recursive=(rec if rec is not None else False),\n+ **kwargs)\n+ else:\n+ info = self._get_info(element, ename=name,\n+ recursive=(rec if rec is not None else True),\n+ **kwargs)\n+\n+ converters = {'float': float, 'int': int,\n+ 'bool': lambda x: x.lower() in {'1', 'true', 'y'}}\n+ for k, v in dict(info).items():\n+ for t, s in self._convert_items:\n+ if k in s:\n+ del info[k]\n+ info[k] = converters[t](v)\n+ p = info.get('parameter')\n+ if isinstance(p, list) and len(p) == 1 and isinstance(p[0], dict):\n+ info.update(info.pop('parameter')[0])\n+ return info\n+\n+def read(source, read_schema=False, iterative=True, **kwargs):\n+ \"\"\"Parse `source` and iterate through protein groups.\n+\n+ Parameters\n+ ----------\n+ source : str or file\n+ A path to a target protXML file or the file object itself.\n+\n+ read_schema : bool, optional\n+ If :py:const:`True`, attempt to extract information from the XML schema\n+ mentioned in the protXML header. Otherwise, use default parameters.\n+ Not recommended without Internet connection or\n+ if you don't like to get the related warnings.\n+\n+ iterative : bool, optional\n+ Defines whether iterative parsing should be used. It helps reduce\n+ memory usage at almost the same parsing speed. Default is\n+ :py:const:`True`.\n+\n+ Returns\n+ -------\n+ out : ProtXML\n+ An iterator over dicts with protein group properties.\n+ \"\"\"\n+\n+ return ProtXML(source, read_schema=read_schema, iterative=iterative)\n+\n+chain = aux._make_chain(read, 'read')\n+\n+def DataFrame(*args, **kwargs):\n+ \"\"\"Read protXML output files into a :py:class:`pandas.DataFrame`.\n+\n+ Requires :py:mod:`pandas`.\n+\n+ Parameters\n+ ----------\n+ *args, **kwargs : passed to :py:func:`chain`\n+\n+ sep : str or None, optional\n+ Some values related to protein groups are variable-length lists.\n+ If `sep` is a :py:class:`str`, they will be packed into single string using\n+ this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is\n+ :py:const:`None`.\n+\n+ pd_kwargs : dict, optional\n+ Keyword arguments passed to the :py:class:`pandas.DataFrame` constructor.\n+\n+ Returns\n+ -------\n+ out : pandas.DataFrame\n+ \"\"\"\n+ import pandas as pd\n+ kwargs = kwargs.copy()\n+ sep = kwargs.pop('sep', None)\n+ pd_kwargs = kwargs.pop('pd_kwargs', {})\n+ def gen_items():\n+ with chain(*args, **kwargs) as f:\n+ for item in f:\n+ info = {}\n+ for k, v in item.items():\n+ if isinstance(v, (str, int, float)):\n+ info[k] = v\n+ if 'protein' in item:\n+ for prot in item['protein']:\n+ out = dict(info)\n+ out.update(prot)\n+ if 'unique_stripped_peptides' in out:\n+ if sep is None:\n+ out['unique_stripped_peptides'] = out['unique_stripped_peptides'].split('+')\n+ else:\n+ out['unique_stripped_peptides'] = sep.join(out['unique_stripped_peptides'].split('+'))\n+ if 'indistinguishable_protein' in out:\n+ if sep is None:\n+ out['indistinguishable_protein'] = [p['protein_name'] for p in out['indistinguishable_protein']]\n+ else:\n+ out['indistinguishable_protein'] = sep.join(p['protein_name'] for p in out['indistinguishable_protein'])\n+ yield out\n+ return pd.DataFrame(gen_items(), **pd_kwargs)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -81,9 +81,9 @@ class XMLValueConverter(object):\n@classmethod\ndef str_to_bool(cls, s):\n- if s.lower() in {'true', '1'}:\n+ if s.lower() in {'true', '1', 'y'}:\nreturn True\n- if s.lower() in {'false', '0'}:\n+ if s.lower() in {'false', '0', 'n'}:\nreturn False\nraise PyteomicsError('Cannot convert string to bool: ' + s)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Draft protxml parser |
377,522 | 08.05.2018 16:42:17 | -10,800 | 657d55f5bba565adffc415da1a1095524338a982 | Fix modification_info flattening in protxml | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -7,7 +7,7 @@ class ProtXML(xml.XML):\n_default_schema = _schema_defaults._protxml_schema_defaults\n# _default_version = None\n_default_iter_tag = 'protein_group'\n- _structures_to_flatten = {'modification_info', 'annotation'}\n+ _structures_to_flatten = {'annotation'}\n# attributes which contain unconverted values\n_convert_items = {'float': {'pct_spectrum_ids'},\n'int': {'group_number', 'prot_length'},\n@@ -40,6 +40,10 @@ class ProtXML(xml.XML):\np = info.get('parameter')\nif isinstance(p, list) and len(p) == 1 and isinstance(p[0], dict):\ninfo.update(info.pop('parameter')[0])\n+\n+ if 'modification_info' in info:\n+ # this is a list with one element\n+ info.update(info.pop('modification_info')[0])\nreturn info\ndef read(source, read_schema=False, iterative=True, **kwargs):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix modification_info flattening in protxml |
377,522 | 09.05.2018 21:04:54 | -10,800 | 392f8ff67d49f471afda830e40e80f1ea9f8410a | Fix args[0] error in _file_reader | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -178,7 +178,10 @@ def _file_reader(_mode='r'):\n\"\"\"\n@wraps(_func)\ndef helper(*args, **kwargs):\n+ if args:\nreturn FileReader(args[0], _mode, _func, True, args[1:], kwargs, kwargs.pop('encoding', None))\n+ source = kwargs.pop('source', None)\n+ return FileReader(source, _mode, _func, True, (), kwargs, kwargs.pop('encoding', None))\nreturn helper\nreturn decorator\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix args[0] error in _file_reader |
377,522 | 09.05.2018 21:14:10 | -10,800 | 6b43cb1a397efa07d5264658ae301dee2671e6c1 | Fix some consequences of auxiliary split | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "try:\n- basestring\n+ basestring = basestring\nexcept NameError:\nbasestring = (str, bytes)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/target_decoy.py",
"new_path": "pyteomics/auxiliary/target_decoy.py",
"diff": "+from __future__ import absolute_import\nimport re\nimport operator as op\nimport math\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix some consequences of auxiliary split |
377,522 | 10.05.2018 00:07:27 | -10,800 | 74001d17e5692c4022bbe6c8a23ecd3e1171145e | Add project_urls in setup.py | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -25,6 +25,12 @@ setup(\nauthor_email = 'pyteomics@googlegroups.com',\nurl = 'http://hg.theorchromo.ru/pyteomics',\npackages = ['pyteomics', 'pyteomics.mass', 'pyteomics.openms'],\n+ project_urls = {\n+ 'Documentation': 'http://pythonhosted.org/pyteomics/',\n+ 'Source Code' : 'https://bitbucket.org/levitsky/pyteomics',\n+ 'Issue Tracker': 'http://bitbucket.org/levitsky/pyteomics/issues',\n+ 'Mailing List' : 'https://groups.google.com/group/pyteomics',\n+ },\nnamespace_packages = ['pyteomics'],\nextras_require = {'XML': ['lxml', 'numpy'],\n'FDR': ['numpy'],\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add project_urls in setup.py |
377,522 | 11.05.2018 19:26:58 | -10,800 | 74e1462f839bb56d4c968693969fc884cb972433 | Add auxiliary package in setup.py | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -24,7 +24,7 @@ setup(\nauthor = 'Anton Goloborodko & Lev Levitsky',\nauthor_email = 'pyteomics@googlegroups.com',\nurl = 'http://hg.theorchromo.ru/pyteomics',\n- packages = ['pyteomics', 'pyteomics.mass', 'pyteomics.openms'],\n+ packages = ['pyteomics', 'pyteomics.mass', 'pyteomics.openms', 'pyteomics.auxiliary'],\nproject_urls = {\n'Documentation': 'http://pythonhosted.org/pyteomics/',\n'Source Code' : 'https://bitbucket.org/levitsky/pyteomics',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add auxiliary package in setup.py |
377,522 | 14.05.2018 00:22:02 | -10,800 | 43f739347b7acea29ea0bdec8172f36ccc628dfd | Start protxml docs | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "This functionality will be changed in upcoming versions.\n+ - New module :py:mod:`pyteomics.protxml` for parsing of ProteinProphet output files.\n+\n- New parameter `huge_tree` in XML parser constructors and :py:func:`read` functions.\nIt is passed to the underlying :py:mod:`lxml` calls. Default value is `False`.\nSet to `True` to overcome errors such as: `XMLSyntaxError: xmlSAX2Characters: huge text node`.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/api.rst",
"new_path": "doc/source/api.rst",
"diff": "@@ -20,6 +20,7 @@ Contents:\napi/mgf\napi/ms1\napi/pepxml\n+ api/protxml\napi/tandem\napi/mzid\napi/featurexml\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "doc/source/api/protxml.rst",
"diff": "+.. automodule:: pyteomics.protxml\n+\n+ .. autofunction:: chain\n+ .. py:function :: chain.from_iterable(files, **kwargs)\n+\n+ Chain :py:func:`read` for several files.\n+ Keyword arguments are passed to the :py:func:`read` function.\n+\n+ Parameters\n+ ----------\n+ files : iterable\n+ Iterable of file names or file objects.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "+\"\"\"\n+protxml - parsing of ProteinProphet output files\n+================================================\n+\n+Summary\n+-------\n+\n+**protXML** is the output format of the `ProteinProphet software <http://proteinprophet.sourceforge.net/>`_.\n+It contains information about identified proteins and their statistical significance.\n+\n+This module provides minimalistic infrastructure for access to data stored in\n+protXML files. The central class is :py:class:`ProtXML`, which\n+reads protein entries and related information and saves them into\n+Python dicts.\n+\n+Data access\n+-----------\n+\n+ :py:class:`ProtXML` - a class representing a single protXML file.\n+ Other data access functions use this class internally.\n+\n+ :py:func:`read` - iterate through peptide-spectrum matches in a protXML\n+ file. Calling the function is synonymous to instantiating the :py:class:`ProtXML` class.\n+\n+ :py:func:`chain` - read multiple files at once.\n+\n+ :py:func:`chain.from_iterable` - read multiple files at once, using an\n+ iterable of files.\n+\n+ :py:func:`DataFrame` - read protXML files into a :py:class:`pandas.DataFrame`.\n+\n+Target-decoy approach\n+---------------------\n+\n+ :py:func:`filter` - filter protein groups from a chain of protXML files to a specific FDR\n+ using TDA.\n+\n+ :py:func:`filter.chain` - chain a series of filters applied independently to\n+ several files.\n+\n+ :py:func:`filter.chain.from_iterable` - chain a series of filters applied\n+ independently to an iterable of files.\n+\n+ :py:func:`filter_df` - filter protXML files and return a :py:class:`pandas.DataFrame`.\n+\n+ :py:func:`fdr` - estimate the false discovery rate of a PSM set using the\n+ target-decoy approach.\n+\n+ :py:func:`qvalues` - get an array of scores and local FDR values for protein groups using the target-decoy approach.\n+\n+ :py:func:`is_decoy` - determine whether a protein group is decoy or not.\n+\n+Dependencies\n+------------\n+\n+This module requres :py:mod:`lxml`.\n+\n+--------------------------------------------------------------------------------\n+\"\"\"\n+\n+# Copyright 2018 Lev Levitsky\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\nfrom . import xml, auxiliary as aux, _schema_defaults\nclass ProtXML(xml.XML):\n- \"\"\"Parser class for pepXML files.\"\"\"\n+ \"\"\"Parser class for protXML files.\"\"\"\nfile_format = 'protXML'\n_root_element = 'protein_summary'\n_default_schema = _schema_defaults._protxml_schema_defaults\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Start protxml docs |
377,522 | 14.05.2018 01:48:19 | -10,800 | 8bfaf20233e13d499f5327929bc22f22acb850b4 | Make skip_empty_cvparam_values a class attr, add kwarg in mzid init | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -112,6 +112,43 @@ class MzIdentML(xml.IndexSavingXML):\n'PeptideEvidence', 'SpectrumIdentificationItem', 'SearchDatabase',\n'DBSequence', 'SpectraData', 'Peptide'}\n+ def __init__(self, *args, **kwargs):\n+ \"\"\"Create an MzIdentML parser object.\n+\n+ Parameters\n+ ----------\n+ source : str or file\n+ File name or file-like object corresponding to an XML file.\n+ read_schema : bool, optional\n+ Defines whether schema file referenced in the file header\n+ should be used to extract information about value conversion.\n+ Default is :py:const:`False`.\n+ iterative : bool, optional\n+ Defines whether an :py:class:`ElementTree` object should be\n+ constructed and stored on the instance or if iterative parsing\n+ should be used instead. Iterative parsing keeps the memory usage\n+ low for large XML files. Default is :py:const:`True`.\n+ use_index : bool, optional\n+ Defines whether an index of byte offsets needs to be created for\n+ elements listed in `indexed_tags`.\n+ This is useful for random access to spectra in mzML or elements of mzIdentML files,\n+ or for iterative parsing of mzIdentML with ``retrieve_refs=True``.\n+ If :py:const:`True`, `build_id_cache` is ignored.\n+ If :py:const:`False`, the object acts exactly like :py:class:`XML`.\n+ Default is :py:const:`True`.\n+ indexed_tags : container of bytes, optional\n+ If `use_index` is :py:const:`True`, elements listed in this parameter\n+ will be indexed. Empty set by default.\n+ skip_empty_cvparam_values : bool, optional\n+ .. warning :: This parameter affects the format of the produced dictionaries.\n+ By default, when parsing cvParam elements, \"value\" attributes with empty values are not\n+ treated differently from others. When this parameter is set to :py:const:`True`,\n+ these empty values are flattened. You can enable this to obtain the same output structure\n+ regardless of the presence of an empty \"value\". Default is :py:const:`False`.\n+ \"\"\"\n+ self._skip_empty_cvparam_values = kwargs.get('skip_empty_cvparam_values', False)\n+ super(MzIdentML, self).__init__(*args, **kwargs)\n+\ndef _get_info_smart(self, element, **kwargs):\n\"\"\"Extract the info in a smart way depending on the element type\"\"\"\nname = xml._local_name(element)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -122,6 +122,7 @@ class XML(FileReader):\n_schema_location_param = 'schemaLocation'\n_default_id_attr = 'id'\n_huge_tree = False\n+ _skip_empty_cvparam_values = False\n# Configurable plugin logic\n_converters = XMLValueConverter.converters()\n@@ -296,7 +297,7 @@ class XML(FileReader):\nunit_name = attribs.get(\"unitName\", unit_accesssion)\nunit_info = unit_name\naccession = attribs.get(\"accession\")\n- if 'value' in attribs and (not kwargs.get('skip_empty_cvparam_values', False) or\n+ if 'value' in attribs and (not self._skip_empty_cvparam_values or\nattribs['value'] != ''):\ntry:\nif attribs.get('type') in types:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make skip_empty_cvparam_values a class attr, add kwarg in mzid init |
377,522 | 14.05.2018 01:52:46 | -10,800 | e6185cebcc1eb29c64cfd8803e730b497ed9d8e3 | Move the init kwarg to xml | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -112,43 +112,6 @@ class MzIdentML(xml.IndexSavingXML):\n'PeptideEvidence', 'SpectrumIdentificationItem', 'SearchDatabase',\n'DBSequence', 'SpectraData', 'Peptide'}\n- def __init__(self, *args, **kwargs):\n- \"\"\"Create an MzIdentML parser object.\n-\n- Parameters\n- ----------\n- source : str or file\n- File name or file-like object corresponding to an XML file.\n- read_schema : bool, optional\n- Defines whether schema file referenced in the file header\n- should be used to extract information about value conversion.\n- Default is :py:const:`False`.\n- iterative : bool, optional\n- Defines whether an :py:class:`ElementTree` object should be\n- constructed and stored on the instance or if iterative parsing\n- should be used instead. Iterative parsing keeps the memory usage\n- low for large XML files. Default is :py:const:`True`.\n- use_index : bool, optional\n- Defines whether an index of byte offsets needs to be created for\n- elements listed in `indexed_tags`.\n- This is useful for random access to spectra in mzML or elements of mzIdentML files,\n- or for iterative parsing of mzIdentML with ``retrieve_refs=True``.\n- If :py:const:`True`, `build_id_cache` is ignored.\n- If :py:const:`False`, the object acts exactly like :py:class:`XML`.\n- Default is :py:const:`True`.\n- indexed_tags : container of bytes, optional\n- If `use_index` is :py:const:`True`, elements listed in this parameter\n- will be indexed. Empty set by default.\n- skip_empty_cvparam_values : bool, optional\n- .. warning :: This parameter affects the format of the produced dictionaries.\n- By default, when parsing cvParam elements, \"value\" attributes with empty values are not\n- treated differently from others. When this parameter is set to :py:const:`True`,\n- these empty values are flattened. You can enable this to obtain the same output structure\n- regardless of the presence of an empty \"value\". Default is :py:const:`False`.\n- \"\"\"\n- self._skip_empty_cvparam_values = kwargs.get('skip_empty_cvparam_values', False)\n- super(MzIdentML, self).__init__(*args, **kwargs)\n-\ndef _get_info_smart(self, element, **kwargs):\n\"\"\"Extract the info in a smart way depending on the element type\"\"\"\nname = xml._local_name(element)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -179,6 +179,7 @@ class XML(FileReader):\nself._converters_items = self._converters.items()\nself._huge_tree = kwargs.get('huge_tree', self._huge_tree)\n+ self._skip_empty_cvparam_values = kwargs.get('skip_empty_cvparam_values', False)\n@_keepstate\ndef _get_version_info(self):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Move the init kwarg to xml |
377,522 | 14.05.2018 19:06:19 | -10,800 | 9a749e46ed257a4955219367b2c97152cb80de2e | Tweak mzml for skip_empty_cvparam_values | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5b1\n\\ No newline at end of file\n+3.5b2\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzml.py",
"new_path": "pyteomics/mzml.py",
"diff": "@@ -204,6 +204,7 @@ class MzML(xml.ArrayConversionMixin, xml.IndexSavingXML):\nif found_compression_types:\nfound_compression_types = tuple(found_compression_types)\nif len(found_compression_types) == 1:\n+ if not self._skip_empty_cvparam_values:\ndel info['name'][found_compression_types[0]]\nreturn found_compression_types[0]\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -159,6 +159,14 @@ class XML(FileReader):\nDefault is :py:const:`False`.\nEnable this option for trusted files to avoid XMLSyntaxError exceptions\n(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).\n+ skip_empty_cvparam_values : bool, optional\n+ .. warning ::\n+ This parameter affects the format of the produced dictionaries.\n+\n+ By default, when parsing cvParam elements, \"value\" attributes with empty values are not\n+ treated differently from others. When this parameter is set to :py:const:`True`,\n+ these empty values are flattened. You can enable this to obtain the same output structure\n+ regardless of the presence of an empty \"value\". Default is :py:const:`False`.\n\"\"\"\nsuper(XML, self).__init__(source, 'rb', self.iterfind, False,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1268,6 +1268,53 @@ mzml_spectra = [{'MSn spectrum': '',\n'scan window upper limit': 2000.0}]}}]},\n'total ion current': 15245068.0}]\n+mzml_spectra_skip_empty_values = [{'base peak intensity': 1471973.875,\n+ 'base peak m/z': 810.415283203125,\n+ 'count': 2,\n+ 'defaultArrayLength': 19914,\n+ 'highest observed m/z': 2000.0099466203771,\n+ 'id': 'controllerType=0 controllerNumber=1 scan=1',\n+ 'index': 0,\n+ 'intensity array': makeCA(mzml_int_array),\n+ 'lowest observed m/z': 200.00018816645022,\n+ 'm/z array': makeCA(mzml_mz_array),\n+ 'ms level': 1,\n+ 'name': ['MSn spectrum', 'positive scan', 'profile spectrum'],\n+ 'scanList': {'count': 1,\n+ 'name': 'no combination',\n+ 'scan': [{'[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688,\n+ 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]',\n+ 'instrumentConfigurationRef': 'IC1',\n+ 'preset scan configuration': 1.0,\n+ 'scan start time': 0.004935,\n+ 'scanWindowList': {'count': 1,\n+ 'scanWindow': [{'scan window lower limit': 200.0,\n+ 'scan window upper limit': 2000.0}]}}]},\n+ 'total ion current': 15245068.0},\n+ {'base peak intensity': 1471973.875,\n+ 'base peak m/z': 810.415283203125,\n+ 'count': 2,\n+ 'defaultArrayLength': 19914,\n+ 'highest observed m/z': 2000.0099466203771,\n+ 'id': 'controllerType=0 controllerNumber=1 scan=1',\n+ 'index': 1,\n+ 'intensity array': makeCA(mzml_int_array),\n+ 'lowest observed m/z': 200.00018816645022,\n+ 'm/z array': makeCA(mzml_mz_array),\n+ 'ms level': 1,\n+ 'name': ['MSn spectrum', 'positive scan', 'profile spectrum'],\n+ 'scanList': {'count': 1,\n+ 'name': 'no combination',\n+ 'scan': [{'[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688,\n+ 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]',\n+ 'instrumentConfigurationRef': 'IC1',\n+ 'preset scan configuration': 1.0,\n+ 'scan start time': 0.004935,\n+ 'scanWindowList': {'count': 1,\n+ 'scanWindow': [{'scan window lower limit': 200.0,\n+ 'scan window upper limit': 2000.0}]}}]},\n+ 'total ion current': 15245068.0}]\n+\nmgf_int = [np.array([ 73., 44., 67., 291., 54., 49.]),\nnp.array([ 237., 128., 108., 1007., 974., 79.])]\nmgf_mz = [np.array([ 846.6, 846.8, 847.6, 1640.1, 1640.6, 1895.5]),\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -8,14 +8,14 @@ from itertools import product\nimport unittest\nfrom pyteomics.mzml import MzML, PreIndexedMzML, read, chain\nfrom pyteomics import auxiliary as aux, xml\n-from data import mzml_spectra\n+from data import mzml_spectra, mzml_spectra_skip_empty_values\nimport numpy as np\nclass MzmlTest(unittest.TestCase):\nmaxDiff = None\npath = 'test.mzML'\n- def testReadSpectrum(self):\n+ def test_read(self):\nfor rs, it, ui in product([True, False], repeat=3):\nfor func in [MzML, read, chain,\nlambda x, **kw: chain.from_iterable([x], **kw), PreIndexedMzML]:\n@@ -23,6 +23,10 @@ class MzmlTest(unittest.TestCase):\n# http://stackoverflow.com/q/14246983/1258041\nself.assertEqual(mzml_spectra, list(r))\n+ def test_read_skip_empty_values(self):\n+ with MzML(self.path, skip_empty_cvparam_values=True) as r:\n+ self.assertEqual(mzml_spectra_skip_empty_values, list(r))\n+\ndef test_decoding(self):\nwith MzML(self.path, decode_binary=True) as reader:\nspectrum = next(reader)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Tweak mzml for skip_empty_cvparam_values |
377,522 | 14.05.2018 19:08:00 | -10,800 | d351c6f4f97dae94b56bd03749462cc929c7d309 | Fix dtype leak via ArrayConversionMixin | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -1106,7 +1106,7 @@ class ArrayConversionMixin(BinaryDataArrayTransformer):\n_array_keys = ['m/z array', 'intensity array']\ndef __init__(self, *args, **kwargs):\n- self._dtype_dict.setdefault(None, None)\n+ self._dtype_dict = {None: None}\ndtype = kwargs.pop('dtype', None)\nif isinstance(dtype, dict):\nself._dtype_dict.update(dtype)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix dtype leak via ArrayConversionMixin |
377,522 | 14.05.2018 19:24:10 | -10,800 | 770bf48699ad1d1e8fe59d992519cb336e6c5ad0 | Make retrieve_refs true by default in MzIdentML init | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -112,6 +112,10 @@ class MzIdentML(xml.IndexSavingXML):\n'PeptideEvidence', 'SpectrumIdentificationItem', 'SearchDatabase',\n'DBSequence', 'SpectraData', 'Peptide'}\n+ def __init__(self, *args, **kwargs):\n+ kwargs.setdefault('retrieve_refs', True)\n+ super(MzIdentML, self).__init__(*args, **kwargs)\n+\ndef _get_info_smart(self, element, **kwargs):\n\"\"\"Extract the info in a smart way depending on the element type\"\"\"\nname = xml._local_name(element)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mzid.py",
"new_path": "tests/test_mzid.py",
"diff": "@@ -2,25 +2,29 @@ from os import path\nimport pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport unittest\n-from pyteomics.mzid import *\n+from pyteomics.mzid import MzIdentML, read, chain\nfrom pyteomics import auxiliary as aux\nfrom data import mzid_spectra\nfrom itertools import product\n-from io import BytesIO\nclass MzidTest(unittest.TestCase):\nmaxDiff = None\n+ path = 'test.mzid'\ndef testReadPSM(self):\nfor rec, refs, rs, it, ui in product((True, False), repeat=5):\nfor func in [MzIdentML, read, chain,\nlambda x, **kw: chain.from_iterable([x], **kw)]:\n- with func('test.mzid', recursive=rec, retrieve_refs=refs,\n+ with func(self.path, recursive=rec, retrieve_refs=refs,\nread_schema=rs, iterative=it, use_index=ui) as reader:\npsms = list(reader)\nself.assertEqual(psms, mzid_spectra[(rec, refs)])\n+ def test_skip_empty_values(self):\n+ with MzIdentML(self.path, skip_empty_cvparam_values=True, recursive=True, retrieve_refs=True) as f:\n+ self.assertEqual(list(f), mzid_spectra[(True, True)])\n+\ndef test_unit_info(self):\n- with MzIdentML('test.mzid') as handle:\n+ with MzIdentML(self.path) as handle:\nfor protocol in handle.iterfind(\"SpectrumIdentificationProtocol\"):\nfragment_tolerance = protocol['FragmentTolerance']\nself.assertEqual(fragment_tolerance['search tolerance minus value'].unit_info, 'dalton')\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make retrieve_refs true by default in MzIdentML init |
377,522 | 15.05.2018 17:48:51 | -10,800 | e11a77d3264f5b4871a9034d4714318e139e72f0 | Tweak pepxml test | [
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -18,7 +18,7 @@ def makeCA(arr):\narr = np.array(arr)\nreturn ComparableArray(arr.shape, arr.dtype, arr)\n-pepxml_spectra = [\n+pepxml_results = [\n{'spectrum': 'pps_sl20060731_18mix_25ul_r1_1154456409.0100.0100.1',\n'end_scan': 100,\n'start_scan': 100,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_pepxml.py",
"new_path": "tests/test_pepxml.py",
"diff": "@@ -3,8 +3,8 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom itertools import product\nimport unittest\n-from pyteomics.pepxml import *\n-from data import pepxml_spectra\n+from pyteomics.pepxml import PepXML, read, chain, filter\n+from data import pepxml_results\nclass PepxmlTest(unittest.TestCase):\nmaxDiff = None\n@@ -21,7 +21,7 @@ class PepxmlTest(unittest.TestCase):\nlambda x, **kw: filter.chain(x, **PepxmlTest._kw),\nlambda x, **kw: filter.chain.from_iterable([x], **PepxmlTest._kw)]:\nwith func('test.pep.xml', read_schema=rs, iterative=it) as r:\n- self.assertEqual(list(r), pepxml_spectra)\n+ self.assertEqual(list(r), pepxml_results)\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Tweak pepxml test |
377,522 | 15.05.2018 18:10:01 | -10,800 | 2f10d935522f2dfbbb2a1b7f741169dbe2e7ce64 | Start protxml test | [
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1655,3 +1655,533 @@ ms1_header = {'CreationDate': 'Sat Jun 03 15:25:10 2017',\n'Extractor version': 'Xcalibur',\n'Extractor': 'ProteoWizard',\n'Source file': 'Set 1. B2 at 193 nm RT.RAW'}\n+\n+protxml_results =[{'group_number': 1,\n+ 'probability': 1.0,\n+ 'protein': [{'confidence': 1.0,\n+ 'group_sibling_id': 'a',\n+ 'n_indistinguishable_proteins': 1,\n+ 'pct_spectrum_ids': 0.018,\n+ 'peptide': [{'calc_neutral_pep_mass': 2094.0307,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9995,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 4.38,\n+ 'n_sibling_peptides_bin': 3,\n+ 'nsp_adjusted_probability': 0.9995,\n+ 'peptide_group_designator': 'a',\n+ 'peptide_sequence': 'NIPIMSTASVEIDDAIYSR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1538.794,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9995,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'mod_aminoacid_mass': [{'mass': '111.032030', 'position': '1'}],\n+ 'modified_peptide': 'Q[111]DVIITAIDNVEAR',\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 4.38,\n+ 'n_sibling_peptides_bin': 3,\n+ 'nsp_adjusted_probability': 0.9995,\n+ 'peptide_sequence': 'QDVIITAIDNVEAR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2094.0303,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9995,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 4.38,\n+ 'n_sibling_peptides_bin': 3,\n+ 'nsp_adjusted_probability': 0.9995,\n+ 'peptide_group_designator': 'a',\n+ 'peptide_sequence': 'NIPIMSTASVEIDDAIYSR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2212.2752,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9981,\n+ 'initial_probability': 0.996,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 4.38,\n+ 'n_sibling_peptides_bin': 3,\n+ 'nsp_adjusted_probability': 0.9981,\n+ 'peptide_sequence': 'IIPAIATTTATVSGIVAIEMIK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1126.5658,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.66,\n+ 'fpkm_adjusted_probability': 0.8017,\n+ 'initial_probability': 0.6598,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 4.71,\n+ 'n_sibling_peptides_bin': 3,\n+ 'nsp_adjusted_probability': 0.8017,\n+ 'peptide_sequence': 'TVFFESIER',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 961.5233,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.47,\n+ 'fpkm_adjusted_probability': 0.695,\n+ 'initial_probability': 0.4723,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 4.9,\n+ 'n_sibling_peptides_bin': 4,\n+ 'nsp_adjusted_probability': 0.695,\n+ 'peptide_sequence': 'NAIFQIEK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 945.5131,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.25,\n+ 'fpkm_adjusted_probability': 0.249,\n+ 'initial_probability': 0.249,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 5.13,\n+ 'n_sibling_peptides_bin': 4,\n+ 'nsp_adjusted_probability': 0.249,\n+ 'peptide_sequence': 'AIISNEATK',\n+ 'weight': 1.0}],\n+ 'percent_coverage': 7.7,\n+ 'probability': 1.0,\n+ 'prot_length': 1052,\n+ 'protein_description': 'Ubiquitin-like modifier-activating enzyme 6 OS=Homo sapiens GN=UBA6 PE=1 SV=1',\n+ 'protein_name': 'sp|A0AVT1|UBA6_HUMAN',\n+ 'raw_intensity': '0.000',\n+ 'total_number_distinct_peptides': 7,\n+ 'total_number_peptides': 7,\n+ 'unique_stripped_peptides': 'AIISNEATK+IIPAIATTTATVSGIVAIEMIK+NAIFQIEK+NIPIMSTASVEIDDAIYSR+QDVIITAIDNVEAR+TVFFESIER'}]},\n+ {'group_number': 2,\n+ 'probability': 1.0,\n+ 'protein': [{'confidence': 1.0,\n+ 'group_sibling_id': 'a',\n+ 'n_indistinguishable_proteins': 1,\n+ 'pct_spectrum_ids': 0.093,\n+ 'peptide': [{'calc_neutral_pep_mass': 1519.9086,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'a',\n+ 'peptide_sequence': 'AVPIAIAIISVSNPR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1166.5316,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_sequence': 'FGGSGSQVDSAR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1958.9486,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'b',\n+ 'peptide_sequence': 'IVGSQEEIASWGHEYVR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2116.0047,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'c',\n+ 'peptide_sequence': 'MNIASSFVNGFVNAAFGQDK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1451.8096,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.9989,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'd',\n+ 'peptide_sequence': 'VGQAVDVVGQAGKPK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2456.3566,\n+ 'charge': 3,\n+ 'exp_tot_instances': 2.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 2,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'e',\n+ 'peptide_sequence': 'AEIATEEFIPVTPIIEGFVIIR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2217.1027,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_sequence': 'APVQPQQSPAAAPGGTDEKPSGK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1519.9086,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'a',\n+ 'peptide_sequence': 'AVPIAIAIISVSNPR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2460.2245,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.9989,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_sequence': 'DKAPVQPQQSPAAAPGGTDEKPSGK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1486.6874,\n+ 'charge': 3,\n+ 'exp_tot_instances': 2.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'mod_aminoacid_mass': [{'mass': '228.056870', 'position': '6'}],\n+ 'modified_peptide': 'GTITICPYHSDR',\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 2,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_sequence': 'GTITICPYHSDR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1958.9486,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'b',\n+ 'peptide_sequence': 'IVGSQEEIASWGHEYVR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2116.0047,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'c',\n+ 'peptide_sequence': 'MNIASSFVNGFVNAAFGQDK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2078.0909,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_sequence': 'TITGFQTHTTPVIIAHGER',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1451.8096,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_group_designator': 'd',\n+ 'peptide_sequence': 'VGQAVDVVGQAGKPK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1712.8477,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9997,\n+ 'initial_probability': 0.999,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9997,\n+ 'peptide_sequence': 'VPDDIYKTHIENNR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 834.4235,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9996,\n+ 'initial_probability': 0.9988,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9996,\n+ 'peptide_sequence': 'YGEPTIR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2000.0765,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9996,\n+ 'initial_probability': 0.9986,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.8,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9996,\n+ 'peptide_sequence': 'MIVTFDEEIRPIPVSVR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2584.4516,\n+ 'charge': 3,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9994,\n+ 'initial_probability': 0.9979,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.81,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9994,\n+ 'peptide_sequence': 'AEIATEEFIPVTPIIEGFVIIRK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1540.8031,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9992,\n+ 'initial_probability': 0.9973,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'mod_aminoacid_mass': [{'mass': '228.056870', 'position': '7'}],\n+ 'modified_peptide': 'SGAIIACGIVNSGVR',\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.81,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9992,\n+ 'peptide_sequence': 'SGAIIACGIVNSGVR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1279.5972,\n+ 'charge': 2,\n+ 'exp_tot_instances': 1.0,\n+ 'fpkm_adjusted_probability': 0.9988,\n+ 'initial_probability': 0.9959,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.81,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9988,\n+ 'peptide_sequence': 'YIYSSEDYIK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2520.3227,\n+ 'charge': 3,\n+ 'exp_tot_instances': 0.99,\n+ 'fpkm_adjusted_probability': 0.9975,\n+ 'initial_probability': 0.9917,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'mod_aminoacid_mass': [{'mass': '111.032030', 'position': '1'}],\n+ 'modified_peptide': 'E[111]WQEIDDAEKVQREPIITIVK',\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.81,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9975,\n+ 'peptide_sequence': 'EWQEIDDAEKVQREPIITIVK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 2456.3566,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.99,\n+ 'fpkm_adjusted_probability': 0.9969,\n+ 'initial_probability': 0.9896,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.81,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9969,\n+ 'peptide_group_designator': 'e',\n+ 'peptide_sequence': 'AEIATEEFIPVTPIIEGFVIIR',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1294.7972,\n+ 'charge': 3,\n+ 'exp_tot_instances': 0.98,\n+ 'fpkm_adjusted_probability': 0.995,\n+ 'initial_probability': 0.9832,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.82,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.995,\n+ 'peptide_sequence': 'VQREPIITIVK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 1015.5913,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.86,\n+ 'fpkm_adjusted_probability': 0.9544,\n+ 'initial_probability': 0.8603,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.94,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9544,\n+ 'peptide_sequence': 'INIIDTISK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 911.5691,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.86,\n+ 'fpkm_adjusted_probability': 0.9526,\n+ 'initial_probability': 0.8555,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 24.95,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9526,\n+ 'peptide_sequence': 'EPIITIVK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 973.479,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.8,\n+ 'fpkm_adjusted_probability': 0.9297,\n+ 'initial_probability': 0.7956,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': True,\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 25.01,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.9297,\n+ 'peptide_sequence': 'EIDIMEPK',\n+ 'weight': 1.0},\n+ {'calc_neutral_pep_mass': 889.448,\n+ 'charge': 2,\n+ 'exp_tot_instances': 0.65,\n+ 'fpkm_adjusted_probability': 0.8644,\n+ 'initial_probability': 0.6523,\n+ 'is_contributing_evidence': True,\n+ 'is_nondegenerate_evidence': False,\n+ 'mod_aminoacid_mass': [{'mass': '228.056870', 'position': '1'}],\n+ 'modified_peptide': 'CAIGVFR',\n+ 'n_enzymatic_termini': 2,\n+ 'n_instances': 1,\n+ 'n_sibling_peptides': 25.45,\n+ 'n_sibling_peptides_bin': 8,\n+ 'nsp_adjusted_probability': 0.8644,\n+ 'peptide_parent_protein': [{'protein_name': 'DECOY_sp|A0A5B9|TRBC2_HUMAN'}],\n+ 'peptide_sequence': 'CAIGVFR',\n+ 'weight': 0.54}],\n+ 'percent_coverage': 29.3,\n+ 'probability': 1.0,\n+ 'prot_length': 908,\n+ 'protein_description': '26S proteasome non-ATPase regulatory subunit 2 OS=Homo sapiens GN=PSMD2 PE=1 SV=3',\n+ 'protein_name': 'sp|Q13200|PSMD2_HUMAN',\n+ 'raw_intensity': '0.000',\n+ 'total_number_distinct_peptides': 29,\n+ 'total_number_peptides': 29,\n+ 'unique_stripped_peptides': 'AEIATEEFIPVTPIIEGFVIIR+AEIATEEFIPVTPIIEGFVIIRK+APVQPQQSPAAAPGGTDEKPSGK+AVPIAIAIISVSNPR+CAIGVFR+DKAPVQPQQSPAAAPGGTDEKPSGK+EIDIMEPK+EPIITIVK+EWQEIDDAEKVQREPIITIVK+FGGSGSQVDSAR+GTITICPYHSDR+INIIDTISK+IVGSQEEIASWGHEYVR+MIVTFDEEIRPIPVSVR+MNIASSFVNGFVNAAFGQDK+SGAIIACGIVNSGVR+TITGFQTHTTPVIIAHGER+VGQAVDVVGQAGKPK+VPDDIYKTHIENNR+VQREPIITIVK+YGEPTIR+YIYSSEDYIK'}]}]\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test.prot.xml",
"new_path": "tests/test.prot.xml",
"diff": "</peptide>\n</protein>\n</protein_group>\n+</protein_summary>\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test_protxml.py",
"diff": "+from os import path\n+import pyteomics\n+pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+from itertools import product\n+import unittest\n+from pyteomics.protxml import ProtXML, read, chain#, filter\n+from data import protxml_results\n+import operator as op\n+\n+class ProtXMLTest(unittest.TestCase):\n+ maxDiff = None\n+ _kw = {'full_output': False, 'fdr': 1,\n+ 'key': op.itemgetter('probability'),\n+ 'reverse': True\n+ }\n+ path = 'test.prot.xml'\n+\n+ def test_read(self):\n+ for rs, it in product([True, False], repeat=2):\n+ for func in [ProtXML, read, chain,\n+ lambda x, **kw: chain.from_iterable([x], **kw),\n+ # lambda x, **kw: filter(x, **ProtXMLTest._kw),\n+ # lambda x, **kw: filter.chain(x, **ProtXMLTest._kw),\n+ # lambda x, **kw: filter.chain.from_iterable([x], **ProtXMLTest._kw)\n+ ]:\n+ with func(self.path, read_schema=rs, iterative=it) as r:\n+ self.assertEqual(list(r), protxml_results)\n+\n+if __name__ == '__main__':\n+ unittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Start protxml test |
377,522 | 21.05.2018 00:48:36 | -10,800 | 9dd17952e094adb28e53661507bc4d7745aea53a | Add fdr, qvalues and filter for protxml | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5b2\n\\ No newline at end of file\n+3.5b3\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -73,6 +73,7 @@ This module requres :py:mod:`lxml`.\n# limitations under the License.\nfrom . import xml, auxiliary as aux, _schema_defaults\n+import operator as op\nclass ProtXML(xml.XML):\n\"\"\"Parser class for protXML files.\"\"\"\n@@ -118,6 +119,9 @@ class ProtXML(xml.XML):\nif 'modification_info' in info:\n# this is a list with one element\ninfo.update(info.pop('modification_info')[0])\n+\n+ if 'unique_stripped_peptides' in info:\n+ info['unique_stripped_peptides'] = info['unique_stripped_peptides'].split('+')\nreturn info\ndef read(source, read_schema=False, iterative=True, **kwargs):\n@@ -148,6 +152,12 @@ def read(source, read_schema=False, iterative=True, **kwargs):\nreturn ProtXML(source, read_schema=read_schema, iterative=iterative)\nchain = aux._make_chain(read, 'read')\n+is_decoy = lambda pg: all(p['protein_name'].startswith('DECOY_') for p in pg['protein'])\n+fdr = aux._make_fdr(is_decoy)\n+_key = op.itemgetter('probability')\n+qvalues = aux._make_qvalues(chain, is_decoy, _key)\n+filter = aux._make_filter(chain, is_decoy, _key, qvalues)\n+filter.chain = aux._make_chain(filter, 'filter', True)\ndef DataFrame(*args, **kwargs):\n\"\"\"Read protXML output files into a :py:class:`pandas.DataFrame`.\n@@ -187,9 +197,7 @@ def DataFrame(*args, **kwargs):\nout = dict(info)\nout.update(prot)\nif 'unique_stripped_peptides' in out:\n- if sep is None:\n- out['unique_stripped_peptides'] = out['unique_stripped_peptides'].split('+')\n- else:\n+ if sep is not None:\nout['unique_stripped_peptides'] = sep.join(out['unique_stripped_peptides'].split('+'))\nif 'indistinguishable_protein' in out:\nif sep is None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -1772,9 +1772,9 @@ protxml_results =[{'group_number': 1,\n'raw_intensity': '0.000',\n'total_number_distinct_peptides': 7,\n'total_number_peptides': 7,\n- 'unique_stripped_peptides': 'AIISNEATK+IIPAIATTTATVSGIVAIEMIK+NAIFQIEK+NIPIMSTASVEIDDAIYSR+QDVIITAIDNVEAR+TVFFESIER'}]},\n+ 'unique_stripped_peptides': ['AIISNEATK', 'IIPAIATTTATVSGIVAIEMIK', 'NAIFQIEK', 'NIPIMSTASVEIDDAIYSR', 'QDVIITAIDNVEAR', 'TVFFESIER']}]},\n{'group_number': 2,\n- 'probability': 1.0,\n+ 'probability': 0.999,\n'protein': [{'confidence': 1.0,\n'group_sibling_id': 'a',\n'n_indistinguishable_proteins': 1,\n@@ -2180,8 +2180,8 @@ protxml_results =[{'group_number': 1,\n'probability': 1.0,\n'prot_length': 908,\n'protein_description': '26S proteasome non-ATPase regulatory subunit 2 OS=Homo sapiens GN=PSMD2 PE=1 SV=3',\n- 'protein_name': 'sp|Q13200|PSMD2_HUMAN',\n+ 'protein_name': 'DECOY_sp|Q13200|PSMD2_HUMAN',\n'raw_intensity': '0.000',\n'total_number_distinct_peptides': 29,\n'total_number_peptides': 29,\n- 'unique_stripped_peptides': 'AEIATEEFIPVTPIIEGFVIIR+AEIATEEFIPVTPIIEGFVIIRK+APVQPQQSPAAAPGGTDEKPSGK+AVPIAIAIISVSNPR+CAIGVFR+DKAPVQPQQSPAAAPGGTDEKPSGK+EIDIMEPK+EPIITIVK+EWQEIDDAEKVQREPIITIVK+FGGSGSQVDSAR+GTITICPYHSDR+INIIDTISK+IVGSQEEIASWGHEYVR+MIVTFDEEIRPIPVSVR+MNIASSFVNGFVNAAFGQDK+SGAIIACGIVNSGVR+TITGFQTHTTPVIIAHGER+VGQAVDVVGQAGKPK+VPDDIYKTHIENNR+VQREPIITIVK+YGEPTIR+YIYSSEDYIK'}]}]\n\\ No newline at end of file\n+ 'unique_stripped_peptides': ['AEIATEEFIPVTPIIEGFVIIR', 'AEIATEEFIPVTPIIEGFVIIRK', 'APVQPQQSPAAAPGGTDEKPSGK', 'AVPIAIAIISVSNPR', 'CAIGVFR', 'DKAPVQPQQSPAAAPGGTDEKPSGK', 'EIDIMEPK', 'EPIITIVK', 'EWQEIDDAEKVQREPIITIVK', 'FGGSGSQVDSAR', 'GTITICPYHSDR', 'INIIDTISK', 'IVGSQEEIASWGHEYVR', 'MIVTFDEEIRPIPVSVR', 'MNIASSFVNGFVNAAFGQDK', 'SGAIIACGIVNSGVR', 'TITGFQTHTTPVIIAHGER', 'VGQAVDVVGQAGKPK', 'VPDDIYKTHIENNR', 'VQREPIITIVK', 'YGEPTIR', 'YIYSSEDYIK']}]}]\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test.prot.xml",
"new_path": "tests/test.prot.xml",
"diff": "</peptide>\n</protein>\n</protein_group>\n-<protein_group group_number=\"2\" probability=\"1.0000\">\n- <protein protein_name=\"sp|Q13200|PSMD2_HUMAN\" n_indistinguishable_proteins=\"1\" probability=\"1.0000\" percent_coverage=\"29.3\" unique_stripped_peptides=\"AEIATEEFIPVTPIIEGFVIIR+AEIATEEFIPVTPIIEGFVIIRK+APVQPQQSPAAAPGGTDEKPSGK+AVPIAIAIISVSNPR+CAIGVFR+DKAPVQPQQSPAAAPGGTDEKPSGK+EIDIMEPK+EPIITIVK+EWQEIDDAEKVQREPIITIVK+FGGSGSQVDSAR+GTITICPYHSDR+INIIDTISK+IVGSQEEIASWGHEYVR+MIVTFDEEIRPIPVSVR+MNIASSFVNGFVNAAFGQDK+SGAIIACGIVNSGVR+TITGFQTHTTPVIIAHGER+VGQAVDVVGQAGKPK+VPDDIYKTHIENNR+VQREPIITIVK+YGEPTIR+YIYSSEDYIK\" group_sibling_id=\"a\" total_number_peptides=\"29\" total_number_distinct_peptides=\"29\" pct_spectrum_ids=\"0.093\" raw_intensity=\"0.000\" confidence=\"1.000\">\n+<protein_group group_number=\"2\" probability=\"0.999\">\n+ <protein protein_name=\"DECOY_sp|Q13200|PSMD2_HUMAN\" n_indistinguishable_proteins=\"1\" probability=\"1.0000\" percent_coverage=\"29.3\" unique_stripped_peptides=\"AEIATEEFIPVTPIIEGFVIIR+AEIATEEFIPVTPIIEGFVIIRK+APVQPQQSPAAAPGGTDEKPSGK+AVPIAIAIISVSNPR+CAIGVFR+DKAPVQPQQSPAAAPGGTDEKPSGK+EIDIMEPK+EPIITIVK+EWQEIDDAEKVQREPIITIVK+FGGSGSQVDSAR+GTITICPYHSDR+INIIDTISK+IVGSQEEIASWGHEYVR+MIVTFDEEIRPIPVSVR+MNIASSFVNGFVNAAFGQDK+SGAIIACGIVNSGVR+TITGFQTHTTPVIIAHGER+VGQAVDVVGQAGKPK+VPDDIYKTHIENNR+VQREPIITIVK+YGEPTIR+YIYSSEDYIK\" group_sibling_id=\"a\" total_number_peptides=\"29\" total_number_distinct_peptides=\"29\" pct_spectrum_ids=\"0.093\" raw_intensity=\"0.000\" confidence=\"1.000\">\n<parameter name=\"prot_length\" value=\"908\"/>\n<annotation protein_description=\"26S proteasome non-ATPase regulatory subunit 2 OS=Homo sapiens GN=PSMD2 PE=1 SV=3\"/>\n<peptide peptide_sequence=\"AVPIAIAIISVSNPR\" charge=\"2\" initial_probability=\"0.9990\" nsp_adjusted_probability=\"0.9997\" fpkm_adjusted_probability=\"0.9997\" peptide_group_designator=\"a\" weight=\"1.00\" is_nondegenerate_evidence=\"Y\" n_enzymatic_termini=\"2\" n_sibling_peptides=\"24.80\" n_sibling_peptides_bin=\"8\" n_instances=\"1\" exp_tot_instances=\"1.00\" is_contributing_evidence=\"Y\" calc_neutral_pep_mass=\"1519.9086\">\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_protxml.py",
"new_path": "tests/test_protxml.py",
"diff": "@@ -3,15 +3,16 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom itertools import product\nimport unittest\n-from pyteomics.protxml import ProtXML, read, chain#, filter\n+from pyteomics.protxml import ProtXML, read, chain, filter, fdr, qvalues\nfrom data import protxml_results\nimport operator as op\nclass ProtXMLTest(unittest.TestCase):\nmaxDiff = None\n- _kw = {'full_output': False, 'fdr': 1,\n+ _kw = {'full_output': False, 'fdr': 1.1,\n'key': op.itemgetter('probability'),\n- 'reverse': True\n+ 'reverse': True,\n+ 'remove_decoy': False,\n}\npath = 'test.prot.xml'\n@@ -19,12 +20,26 @@ class ProtXMLTest(unittest.TestCase):\nfor rs, it in product([True, False], repeat=2):\nfor func in [ProtXML, read, chain,\nlambda x, **kw: chain.from_iterable([x], **kw),\n- # lambda x, **kw: filter(x, **ProtXMLTest._kw),\n- # lambda x, **kw: filter.chain(x, **ProtXMLTest._kw),\n- # lambda x, **kw: filter.chain.from_iterable([x], **ProtXMLTest._kw)\n+ lambda x, **kw: filter(x, **ProtXMLTest._kw),\n+ lambda x, **kw: filter.chain(x, **ProtXMLTest._kw),\n+ lambda x, **kw: filter.chain.from_iterable([x], **ProtXMLTest._kw)\n]:\nwith func(self.path, read_schema=rs, iterative=it) as r:\nself.assertEqual(list(r), protxml_results)\n+ def test_fdr(self):\n+ with ProtXML(self.path) as f:\n+ self.assertEqual(fdr(f), 1.0)\n+\n+ def test_filter(self):\n+ kw = self._kw.copy()\n+ kw['remove_decoy'] = True\n+ x = filter(self.path, **kw)\n+ self.assertEqual(list(x), [protxml_results[0]])\n+\n+ def test_qvalues(self):\n+ q = qvalues(self.path, **self._kw)\n+ self.assertEqual(list(q['q']), [0, 1])\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add fdr, qvalues and filter for protxml |
377,522 | 21.05.2018 01:53:16 | -10,800 | fe6f539b576aeaaf3abf18a5613f6bb2c5dd8606 | ProtXML doc update | [
{
"change_type": "MODIFY",
"old_path": "doc/source/api/protxml.rst",
"new_path": "doc/source/api/protxml.rst",
"diff": "----------\nfiles : iterable\nIterable of file names or file objects.\n+\n+ .. autofunction:: filter\n+\n+ .. py:function :: filter.chain(*files, **kwargs)\n+\n+ Chain :py:func:`filter` for several files.\n+ Positional arguments should be file names or file objects.\n+ Keyword arguments are passed to the :py:func:`filter` function.\n+\n+ .. py:function :: filter.chain.from_iterable(*files, **kwargs)\n+\n+ Chain :py:func:`filter` for several files.\n+ Keyword arguments are passed to the :py:func:`filter` function.\n+\n+ Parameters\n+ ----------\n+ files : iterable\n+ Iterable of file names or file objects.\n+\n+ .. autofunction:: fdr\n+ .. autofunction:: qvalues\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -43,12 +43,12 @@ Target-decoy approach\n:py:func:`filter_df` - filter protXML files and return a :py:class:`pandas.DataFrame`.\n- :py:func:`fdr` - estimate the false discovery rate of a PSM set using the\n+ :py:func:`fdr` - estimate the false discovery rate of a set of protein groups using the\ntarget-decoy approach.\n- :py:func:`qvalues` - get an array of scores and local FDR values for protein groups using the target-decoy approach.\n+ :py:func:`qvalues` - get an array of scores and *q* values for protein groups using the target-decoy approach.\n- :py:func:`is_decoy` - determine whether a protein group is decoy or not.\n+ :py:func:`is_decoy` - determine whether a protein group is decoy or not. This function may not suit your use case.\nDependencies\n------------\n@@ -152,7 +152,25 @@ def read(source, read_schema=False, iterative=True, **kwargs):\nreturn ProtXML(source, read_schema=read_schema, iterative=iterative)\nchain = aux._make_chain(read, 'read')\n-is_decoy = lambda pg: all(p['protein_name'].startswith('DECOY_') for p in pg['protein'])\n+def is_decoy(pg):\n+ \"\"\"Determine if a protein group should be considered decoy.\n+\n+ This function checks that all protein names in a group start with \"DECOY_\".\n+ You may need to provide your own function for correct filtering and FDR estimation.\n+\n+ Parameters\n+ ----------\n+\n+ pg : dict\n+ A protein group dict produced by the :py:class:`ProtXML` parser.\n+\n+\n+ Returns\n+ -------\n+\n+ out : bool\n+ \"\"\"\n+ return all(p['protein_name'].startswith('DECOY_') for p in pg['protein'])\nfdr = aux._make_fdr(is_decoy)\n_key = op.itemgetter('probability')\nqvalues = aux._make_qvalues(chain, is_decoy, _key)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | ProtXML doc update |
377,522 | 21.05.2018 14:36:17 | -10,800 | af34b56df5caac345c1508302b705ccabcec5490 | Add prefix to is_decoy, add filter_df | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -152,10 +152,11 @@ def read(source, read_schema=False, iterative=True, **kwargs):\nreturn ProtXML(source, read_schema=read_schema, iterative=iterative)\nchain = aux._make_chain(read, 'read')\n-def is_decoy(pg):\n+\n+def is_decoy(pg, prefix='DECOY_'):\n\"\"\"Determine if a protein group should be considered decoy.\n- This function checks that all protein names in a group start with \"DECOY_\".\n+ This function checks that all protein names in a group start with `prefix`.\nYou may need to provide your own function for correct filtering and FDR estimation.\nParameters\n@@ -163,14 +164,17 @@ def is_decoy(pg):\npg : dict\nA protein group dict produced by the :py:class:`ProtXML` parser.\n-\n+ prefix : str, optional\n+ A prefix used to mark decoy proteins. Default is `'DECOY_'`.\nReturns\n-------\nout : bool\n\"\"\"\n- return all(p['protein_name'].startswith('DECOY_') for p in pg['protein'])\n+ return all(p['protein_name'].startswith(prefix) for p in pg['protein'])\n+\n+\nfdr = aux._make_fdr(is_decoy)\n_key = op.itemgetter('probability')\nqvalues = aux._make_qvalues(chain, is_decoy, _key)\n@@ -180,6 +184,8 @@ filter.chain = aux._make_chain(filter, 'filter', True)\ndef DataFrame(*args, **kwargs):\n\"\"\"Read protXML output files into a :py:class:`pandas.DataFrame`.\n+ .. note :: Rows in the DataFrame correspond to individual proteins, not protein groups.\n+\nRequires :py:mod:`pandas`.\nParameters\n@@ -224,3 +230,41 @@ def DataFrame(*args, **kwargs):\nout['indistinguishable_protein'] = sep.join(p['protein_name'] for p in out['indistinguishable_protein'])\nyield out\nreturn pd.DataFrame(gen_items(), **pd_kwargs)\n+\n+def filter_df(*args, **kwargs):\n+ \"\"\"Read protXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.\n+ Positional arguments can be protXML files or DataFrames.\n+\n+ .. note :: Rows in the DataFrame correspond to individual proteins, not protein groups.\n+\n+ Requires :py:mod:`pandas`.\n+\n+ Parameters\n+ ----------\n+ key : str / iterable / callable, optional\n+ Default is 'probability'.\n+ is_decoy : str / iterable / callable, optional\n+ Default is to check that \"protein_name\" starts with `'DECOY_'`.\n+ reverse : bool, optional\n+ Should be :py:const:`True` if higher score is better.\n+ Default is :py:const:`True` (because the default key is 'probability').\n+\n+ *args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.\n+\n+ Returns\n+ -------\n+ out : pandas.DataFrame\n+ \"\"\"\n+ import pandas as pd\n+ kwargs.setdefault('key', 'probability')\n+ kwargs.setdefault('reverse', True)\n+ if all(isinstance(arg, pd.DataFrame) for arg in args):\n+ if len(args) > 1:\n+ df = pd.concat(args)\n+ else:\n+ df = args[0]\n+ else:\n+ read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\n+ df = DataFrame(*args, **read_kw)\n+ kwargs.setdefault('is_decoy', df['protein_name'].str.startswith('DECOY_'))\n+ return aux.filter(df, **kwargs)\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add prefix to is_decoy, add filter_df |
377,522 | 21.05.2018 18:06:34 | -10,800 | 11702baa40b32a4eecb4ac3f116424c7b7a637dd | Use unittest assertions in new mzml test | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mzml.py",
"new_path": "tests/test_mzml.py",
"diff": "@@ -114,9 +114,9 @@ class MzmlTest(unittest.TestCase):\nderefed = list(reader.iterfind(\"instrumentConfiguration\", retrieve_refs=True))\nreader.reset()\nraw = list(reader.iterfind(\"instrumentConfiguration\", retrieve_refs=False))\n- assert raw[0].get(\"ref\") == 'CommonInstrumentParams'\n- assert \"ref\" not in derefed[0]\n- assert derefed[0].get('instrument serial number') == 'SN06061F'\n+ self.assertEqual(raw[0].get(\"ref\"), 'CommonInstrumentParams')\n+ self.assertNotIn(\"ref\", derefed[0])\n+ self.assertEqual(derefed[0].get('instrument serial number'), 'SN06061F')\nif __name__ == '__main__':\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Use unittest assertions in new mzml test |
377,522 | 21.05.2018 19:38:18 | -10,800 | bc4d819fed6c8ab0495aa7bbb4f2d2c88d19ab24 | Add XML._retrieve_refs_enabled | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/xml.py",
"new_path": "pyteomics/xml.py",
"diff": "@@ -123,6 +123,7 @@ class XML(FileReader):\n_default_id_attr = 'id'\n_huge_tree = False\n_skip_empty_cvparam_values = False\n+ _retrieve_refs_enabled = None # only some subclasses implement this\n# Configurable plugin logic\n_converters = XMLValueConverter.converters()\n@@ -188,6 +189,7 @@ class XML(FileReader):\nself._converters_items = self._converters.items()\nself._huge_tree = kwargs.get('huge_tree', self._huge_tree)\nself._skip_empty_cvparam_values = kwargs.get('skip_empty_cvparam_values', False)\n+ self._retrieve_refs_enabled = kwargs.get('retrieve_refs')\n@_keepstate\ndef _get_version_info(self):\n@@ -377,7 +379,7 @@ class XML(FileReader):\nraise PyteomicsError(message)\n# resolve refs\n- if kwargs.get('retrieve_refs'):\n+ if kwargs.get('retrieve_refs', self._retrieve_refs_enabled):\nself._retrieve_refs(info, **kwargs)\n# flatten the excessive nesting\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add XML._retrieve_refs_enabled |
377,522 | 21.05.2018 22:28:20 | -10,800 | 888eb20eb90999996980e9fdc070fb27889f042e | Add tests for protxml.filter_df and DataFrame | [
{
"change_type": "MODIFY",
"old_path": "tests/test_protxml.py",
"new_path": "tests/test_protxml.py",
"diff": "@@ -3,7 +3,7 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nfrom itertools import product\nimport unittest\n-from pyteomics.protxml import ProtXML, read, chain, filter, fdr, qvalues\n+from pyteomics import protxml\nfrom data import protxml_results\nimport operator as op\n@@ -18,28 +18,39 @@ class ProtXMLTest(unittest.TestCase):\ndef test_read(self):\nfor rs, it in product([True, False], repeat=2):\n- for func in [ProtXML, read, chain,\n- lambda x, **kw: chain.from_iterable([x], **kw),\n- lambda x, **kw: filter(x, **ProtXMLTest._kw),\n- lambda x, **kw: filter.chain(x, **ProtXMLTest._kw),\n- lambda x, **kw: filter.chain.from_iterable([x], **ProtXMLTest._kw)\n+ for func in [protxml.ProtXML, protxml.read, protxml.chain,\n+ lambda x, **kw: protxml.chain.from_iterable([x], **kw),\n+ lambda x, **kw: protxml.filter(x, **ProtXMLTest._kw),\n+ lambda x, **kw: protxml.filter.chain(x, **ProtXMLTest._kw),\n+ lambda x, **kw: protxml.filter.chain.from_iterable([x], **ProtXMLTest._kw)\n]:\nwith func(self.path, read_schema=rs, iterative=it) as r:\nself.assertEqual(list(r), protxml_results)\ndef test_fdr(self):\n- with ProtXML(self.path) as f:\n- self.assertEqual(fdr(f), 1.0)\n+ with protxml.ProtXML(self.path) as f:\n+ self.assertEqual(protxml.fdr(f), 1.0)\ndef test_filter(self):\nkw = self._kw.copy()\nkw['remove_decoy'] = True\n- x = filter(self.path, **kw)\n+ x = protxml.filter(self.path, **kw)\nself.assertEqual(list(x), [protxml_results[0]])\ndef test_qvalues(self):\n- q = qvalues(self.path, **self._kw)\n+ q = protxml.qvalues(self.path, **self._kw)\nself.assertEqual(list(q['q']), [0, 1])\n+ def test_df(self):\n+ df = protxml.DataFrame(self.path)\n+ self.assertEqual(df.shape, (2, 15))\n+\n+ def test_filter_df(self):\n+ kw = self._kw.copy()\n+ del kw['full_output']\n+ kw['key'] = 'probability'\n+ fdf = protxml.filter_df(self.path, **kw)\n+ self.assertEqual(fdf.shape, (2, 17))\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add tests for protxml.filter_df and DataFrame |
377,522 | 22.05.2018 17:37:48 | -10,800 | 364792a3105cf1e57cbca62190385b5f790d5e18 | Doc update on +1 | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/target_decoy.py",
"new_path": "pyteomics/auxiliary/target_decoy.py",
"diff": "@@ -261,9 +261,16 @@ def _make_qvalues(read, is_decoy, key):\ncorrection : int or float, keyword only, optional\nPossible values are 0, 1 and 2, or floating point numbers between 0 and 1.\n- Default is 0 (no correction); 1 accounts for the probability that a false\n- positive scores better than the first excluded decoy PSM; 2 also corrects\n- that probability for finite size of the sample. If a floating point number\n+\n+ 0 (default): no correction;\n+\n+ 1: enable \"+1\" correction. This accounts for the probability that a false\n+ positive scores better than the first excluded decoy PSM;\n+\n+ 2: this also corrects that probability for finite size of the sample,\n+ so the correction will be slightly less than \"+1\".\n+\n+ If a floating point number\nis given, then instead of the expectation value for the number of false PSMs,\nthe confidence value is used. The value of `correction` is then interpreted as\ndesired confidence level. E.g., if correction=0.95, then the calculated q-values\n@@ -577,9 +584,16 @@ def _make_filter(read, is_decoy, key, qvalues):\ncorrection : int or float, keyword only, optional\nPossible values are 0, 1 and 2, or floating point numbers between 0 and 1.\n- Default is 0 (no correction); 1 accounts for the probability that a false\n- positive scores better than the first excluded decoy PSM; 2 also corrects\n- that probability for finite size of the sample. If a floating point number\n+\n+ 0 (default): no correction;\n+\n+ 1: enable \"+1\" correction. This accounts for the probability that a false\n+ positive scores better than the first excluded decoy PSM;\n+\n+ 2: this also corrects that probability for finite size of the sample,\n+ so the correction will be slightly less than \"+1\".\n+\n+ If a floating point number\nis given, then instead of the expectation value for the number of false PSMs,\nthe confidence value is used. The value of `correction` is then interpreted as\ndesired confidence level. E.g., if correction=0.95, then the calculated q-values\n@@ -770,9 +784,16 @@ def _make_fdr(is_decoy):\ncorrection : int or float, optional\nPossible values are 0, 1 and 2, or floating point numbers between 0 and 1.\n- Default is 0 (no correction); 1 accounts for the probability that a false\n- positive scores better than the first excluded decoy PSM; 2 also corrects\n- that probability for finite size of the sample. If a floating point number\n+\n+ 0 (default): no correction;\n+\n+ 1: enable \"+1\" correction. This accounts for the probability that a false\n+ positive scores better than the first excluded decoy PSM;\n+\n+ 2: this also corrects that probability for finite size of the sample,\n+ so the correction will be slightly less than \"+1\".\n+\n+ If a floating point number\nis given, then instead of the expectation value for the number of false PSMs,\nthe confidence value is used. The value of `correction` is then interpreted as\ndesired confidence level. E.g., if correction=0.95, then the calculated q-values\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Doc update on +1 |
377,522 | 22.05.2018 18:05:48 | -10,800 | 30a0b48adf3a260b923277c7d81bd4e4b0877e18 | Add PeptideProphet support in pepxml.DataFrame | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- New module :py:mod:`pyteomics.protxml` for parsing of ProteinProphet output files.\n+ - Add PeptideProphet analysis information to the output of :py:func:`pyteomics.pepxml.DataFrame`.\n+\n- New parameter `huge_tree` in XML parser constructors and :py:func:`read` functions.\nIt is passed to the underlying :py:mod:`lxml` calls. Default value is `False`.\nSet to `True` to overcome errors such as: `XMLSyntaxError: xmlSAX2Characters: huge text node`.\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5b4\n\\ No newline at end of file\n+3.5b5\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -378,10 +378,19 @@ def DataFrame(*args, **kwargs):\ninfo[k] = sep.join(str(val) if val is not None else '' for val in v)\ninfo.update(sh.pop('search_score'))\nmods = sh.pop('modifications', [])\n- info['modifications'] = ','.join('{0[mass]:.3f}@{0[position]}'.format(x) for x in mods)\n+ formatted_mods = ['{0[mass]:.3f}@{0[position]}'.format(x) for x in mods]\n+ if sep is not None:\n+ info['modifications'] = sep.join(formatted_mods)\n+ else:\n+ info['modifications'] = formatted_mods\nfor k, v in sh.items():\nif isinstance(v, (str, int, float)):\ninfo[k] = v\n+ if 'analysis_result' in sh:\n+ ar = sh['analysis_result'][0]\n+ if ar['analysis'] == 'peptideprophet':\n+ info.update(ar['peptideprophet_result']['parameter'])\n+ info['probability'] = ar['peptideprophet_result']['probability']\nyield info\nreturn pd.DataFrame(gen_items(), **pd_kwargs)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add PeptideProphet support in pepxml.DataFrame |
377,522 | 22.05.2018 18:25:50 | -10,800 | a70603ebb3a2a01662d7967023f2656536270cbd | Add iProphet | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- New module :py:mod:`pyteomics.protxml` for parsing of ProteinProphet output files.\n- - Add PeptideProphet analysis information to the output of :py:func:`pyteomics.pepxml.DataFrame`.\n+ - Add PeptideProphet and iProphet analysis information to the output of :py:func:`pyteomics.pepxml.DataFrame`.\n- New parameter `huge_tree` in XML parser constructors and :py:func:`read` functions.\nIt is passed to the underlying :py:mod:`lxml` calls. Default value is `False`.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -387,10 +387,13 @@ def DataFrame(*args, **kwargs):\nif isinstance(v, (str, int, float)):\ninfo[k] = v\nif 'analysis_result' in sh:\n- ar = sh['analysis_result'][0]\n+ for ar in sh['analysis_result']:\nif ar['analysis'] == 'peptideprophet':\ninfo.update(ar['peptideprophet_result']['parameter'])\n- info['probability'] = ar['peptideprophet_result']['probability']\n+ info['peptideprophet_probability'] = ar['peptideprophet_result']['probability']\n+ elif ar['analysis'] == 'interprophet':\n+ info.update(ar['interprophet_result']['parameter'])\n+ info['interprophet_probability'] = ar['interprophet_result']['probability']\nyield info\nreturn pd.DataFrame(gen_items(), **pd_kwargs)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add iProphet |
377,522 | 23.05.2018 19:08:11 | -10,800 | 643f683151411c0ed677eddf203cff6527377e45 | Start decoy_prefix and decoy_suffix, pepxml seems working | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- Preserve accession information on cvParam elements in mzML parser.\nDictionaries produced by the parser can now be queried by accession using\n:py:func:`pyteomics.auxiliary.cvquery`.\n- (Contributed by J. Klein)\n+ *(Contributed by J. Klein)*\n- Add optional `decode_binary` argument in\n:py:class:`pyteomics.mzml.MzML` and :py:class:`pyteomics.mzxml.MzXML`.\nWhen set to `False`, the parsers provide binary records suitable for decoding on demand.\n- (Contributed by J. Klein)\n+ *(Contributed by J. Klein)*\n- Add method :py:meth:`write_byte_offsets` in :py:class:`pyteomics.mzml.MzML`,\n:py:class:`pyteomics.mzxml.MzXML` and :py:class:`pyteomics.mzid.MzIdentML`.\nByte offsets can be loaded later to speed up random access.\n- (Contributed by J. Klein)\n+ *(Contributed by J. Klein)*\n- Random access to MGF spectrum entries.\n- Change the default value for `retrieve_refs` to :py:const:`True` in MzIdentML constructor.\n- - Implement `retrieve_refs` for :py:class:`pyteomics.mzml.MzML` (contributed by J. Klein).\n+ - Implement `retrieve_refs` for :py:class:`pyteomics.mzml.MzML`.\n+ *(Contributed by J. Klein)*\n- New parameter `keep_cterm` in decoy generation functions in :py:mod:`pyteomics.fasta`.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/target_decoy.py",
"new_path": "pyteomics/auxiliary/target_decoy.py",
"diff": "@@ -180,7 +180,7 @@ def _construct_dtype(*args, **kwargs):\nreturn dtype\n-def _make_qvalues(read, is_decoy, key):\n+def _make_qvalues(read, is_decoy_prefix, is_decoy_suffix, key):\n\"\"\"Create a function that reads PSMs from a file and calculates q-values\nfor each value of `key`.\"\"\"\n@@ -346,7 +346,15 @@ def _make_qvalues(read, is_decoy, key):\nkeyf = np.array(list(keyf))\nif peps is None:\n- isdecoy = kwargs.pop('is_decoy', is_decoy)\n+ if 'is_decoy' not in kwargs:\n+ if 'decoy_suffix' in kwargs:\n+ isdecoy = lambda x: is_decoy_suffix(x, kwargs['decoy_suffix'])\n+ elif 'decoy_prefix' in kwargs:\n+ isdecoy = lambda x: is_decoy_prefix(x, kwargs['decoy_prefix'])\n+ else:\n+ isdecoy = is_decoy_prefix\n+ else:\n+ isdecoy = kwargs['is_decoy']\nelse:\nisdecoy = peps\n@@ -449,7 +457,7 @@ def _make_qvalues(read, is_decoy, key):\nreturn psms\nreturn scores\n- _fix_docstring(qvalues, is_decoy=is_decoy, key=key)\n+ _fix_docstring(qvalues, is_decoy=is_decoy_prefix, key=key)\nif read is _iter:\nqvalues.__doc__ = qvalues.__doc__.replace(\"\"\"positional args : file or str\nFiles to read PSMs from. All positional arguments are treated as\n@@ -665,7 +673,7 @@ def _itercontext(x, **kw):\n_iter = _make_chain(_itercontext, 'iter')\n-qvalues = _make_qvalues(_iter, None, None)\n+qvalues = _make_qvalues(_iter, None, None, None)\nfilter = _make_filter(_iter, None, None, qvalues)\nfilter.chain = _make_chain(filter, 'filter', True)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -301,7 +301,7 @@ def roc_curve(source):\nchain = aux._make_chain(read, 'read')\n-def is_decoy(psm, prefix='DECOY_'):\n+def _is_decoy_prefix(psm, prefix='DECOY_'):\n\"\"\"Given a PSM dict, return :py:const:`True` if all protein names for\nthe PSM start with ``prefix``, and :py:const:`False` otherwise. This\nfunction might not work for some pepXML flavours. Use the source to get the\n@@ -321,10 +321,16 @@ def is_decoy(psm, prefix='DECOY_'):\nreturn all(protein['protein'].startswith(prefix)\nfor protein in psm['search_hit'][0]['proteins'])\n+def _is_decoy_suffix(psm, suffix='DECOY_'):\n+ return all(protein['protein'].endswith(suffix)\n+ for protein in psm['search_hit'][0]['proteins'])\n+\n+is_decoy = _is_decoy_prefix\n+\nfdr = aux._make_fdr(is_decoy)\n_key = lambda x: min(\nsh['search_score']['expect'] for sh in x['search_hit'])\n-qvalues = aux._make_qvalues(chain, is_decoy, _key)\n+qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)\nfilter = aux._make_filter(chain, is_decoy, _key, qvalues)\nfilter.chain = aux._make_chain(filter, 'filter', True)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Start decoy_prefix and decoy_suffix, pepxml seems working |
377,522 | 24.05.2018 17:55:38 | -10,800 | a3c88dc49a182507b027d73b251b88a167afe6b3 | Add decoy prefix and suffix everywhere | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5b5\n\\ No newline at end of file\n+3.5b6\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/target_decoy.py",
"new_path": "pyteomics/auxiliary/target_decoy.py",
"diff": "@@ -470,7 +470,7 @@ def _make_qvalues(read, is_decoy_prefix, is_decoy_suffix, key):\nreturn qvalues\n-def _make_filter(read, is_decoy, key, qvalues):\n+def _make_filter(read, is_decoy_prefix, is_decoy_suffix, key, qvalues):\n\"\"\"Create a function that reads PSMs from a file and filters them to\nthe desired FDR level (estimated by TDA), returning the top PSMs\nsorted by `key`.\n@@ -494,7 +494,15 @@ def _make_filter(read, is_decoy, key, qvalues):\nkeyf = peps\nreverse = kwargs.pop('reverse', False)\nbetter = [op.lt, op.gt][bool(reverse)]\n- isdecoy = kwargs.pop('is_decoy', is_decoy)\n+ if 'is_decoy' not in kwargs:\n+ if 'decoy_suffix' in kwargs:\n+ isdecoy = lambda x: is_decoy_suffix(x, kwargs['decoy_suffix'])\n+ elif 'decoy_prefix' in kwargs:\n+ isdecoy = lambda x: is_decoy_prefix(x, kwargs['decoy_prefix'])\n+ else:\n+ isdecoy = is_decoy_prefix\n+ else:\n+ isdecoy = kwargs['is_decoy']\nkwargs.pop('formula', None)\ndecoy_or_pep_label = _decoy_or_pep_label(**kwargs)\nscore_label = kwargs.setdefault('score_label', 'score')\n@@ -652,7 +660,7 @@ def _make_filter(read, is_decoy, key, qvalues):\nreturn filter(*args, full_output=True, **kwargs)\nreturn IteratorContextManager(filter, *args, **kwargs)\n- _fix_docstring(_filter, is_decoy=is_decoy, key=key)\n+ _fix_docstring(_filter, is_decoy=is_decoy_prefix, key=key)\nif read is _iter:\n_filter.__doc__ = _filter.__doc__.replace(\"\"\"positional args : file or str\nFiles to read PSMs from. All positional arguments are treated as\n@@ -675,7 +683,7 @@ def _itercontext(x, **kw):\n_iter = _make_chain(_itercontext, 'iter')\nqvalues = _make_qvalues(_iter, None, None, None)\n-filter = _make_filter(_iter, None, None, qvalues)\n+filter = _make_filter(_iter, None, None, None, qvalues)\nfilter.chain = _make_chain(filter, 'filter', True)\ntry:\n@@ -718,10 +726,10 @@ except ImportError:\nreturn math.log(math.factorial(n))\ndef _expectation(*a, **k):\n- raise NotImplementedError()\n+ raise NotImplementedError('NumPy required')\ndef _confidence_value(*a, **k):\n- raise NotImplementedError()\n+ raise NotImplementedError('NumPy required')\ndef _log_pi_r(d, k, p=0.5):\n@@ -732,8 +740,8 @@ def _log_pi(d, k, p=0.5):\nreturn _log_pi_r(d, k, p) + (d + 1) * math.log(1 - p)\n-def _make_fdr(is_decoy):\n- def fdr(psms=None, formula=1, is_decoy=is_decoy, ratio=1, correction=0, pep=None):\n+def _make_fdr(is_decoy_prefix, is_decoy_suffix):\n+ def fdr(psms=None, formula=1, is_decoy=None, ratio=1, correction=0, pep=None, decoy_prefix='DECOY_', decoy_suffix=None):\n\"\"\"Estimate FDR of a data set using TDA or given PEP values.\nTwo formulas can be used. The first one (default) is:\n@@ -827,6 +835,11 @@ def _make_fdr(is_decoy):\ntotal, decoy = 0, 0\nif pep is not None:\nis_decoy = pep\n+ elif is_decoy is None:\n+ if decoy_suffix is not None:\n+ is_decoy = lambda x: is_decoy_suffix(x, decoy_suffix)\n+ else:\n+ is_decoy = lambda x: is_decoy_prefix(x, decoy_prefix)\nif isinstance(is_decoy, basestring):\ndecoy = psms[is_decoy].sum()\ntotal = psms.shape[0]\n@@ -858,12 +871,12 @@ def _make_fdr(is_decoy):\nreturn float(tfalse) / (total - decoy) / ratio\nreturn (decoy + tfalse / ratio) / total\n- _fix_docstring(fdr, is_decoy=is_decoy)\n- if is_decoy is None:\n+ _fix_docstring(fdr, is_decoy=is_decoy_prefix)\n+ if is_decoy_prefix is None:\nfdr.__doc__ = fdr.__doc__.replace(\"\"\".. warning::\nThe default function may not work\nwith your files, because format flavours are diverse.\\n\"\"\", \"\")\nreturn fdr\n-fdr = _make_fdr(None)\n+fdr = _make_fdr(None, None)\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mzid.py",
"new_path": "pyteomics/mzid.py",
"diff": "@@ -306,7 +306,6 @@ def is_decoy(psm):\nreturn all(pe['isDecoy'] for sii in psm['SpectrumIdentificationItem']\nfor pe in sii['PeptideEvidenceRef'])\n-\ndef DataFrame(*args, **kwargs):\n\"\"\"Read MzIdentML files into a :py:class:`pandas.DataFrame`.\n@@ -406,9 +405,9 @@ def filter_df(*args, **kwargs):\ndf = DataFrame(*args, **kwargs)\nreturn aux.filter(df, **kwargs)\n-fdr = aux._make_fdr(is_decoy)\n+fdr = aux._make_fdr(is_decoy, None)\n_key = lambda x: min(\nsii['mascot:expectation value'] for sii in x['SpectrumIdentificationItem'])\n-qvalues = aux._make_qvalues(chain, is_decoy, _key)\n-filter = aux._make_filter(chain, is_decoy, _key, qvalues)\n+qvalues = aux._make_qvalues(chain, is_decoy, None, _key)\n+filter = aux._make_filter(chain, is_decoy, None, _key, qvalues)\nfilter.chain = aux._make_chain(filter, 'filter', True)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/pepxml.py",
"new_path": "pyteomics/pepxml.py",
"diff": "@@ -321,17 +321,17 @@ def _is_decoy_prefix(psm, prefix='DECOY_'):\nreturn all(protein['protein'].startswith(prefix)\nfor protein in psm['search_hit'][0]['proteins'])\n-def _is_decoy_suffix(psm, suffix='DECOY_'):\n+def _is_decoy_suffix(psm, suffix='_DECOY'):\nreturn all(protein['protein'].endswith(suffix)\nfor protein in psm['search_hit'][0]['proteins'])\nis_decoy = _is_decoy_prefix\n-fdr = aux._make_fdr(is_decoy)\n+fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)\n_key = lambda x: min(\nsh['search_score']['expect'] for sh in x['search_hit'])\nqvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)\n-filter = aux._make_filter(chain, is_decoy, _key, qvalues)\n+filter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, _key, qvalues)\nfilter.chain = aux._make_chain(filter, 'filter', True)\ndef DataFrame(*args, **kwargs):\n@@ -433,10 +433,19 @@ def filter_df(*args, **kwargs):\nelse:\nread_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\n+ if 'is_decoy' not in kwargs:\nif sep is not None:\n- kwargs.setdefault('is_decoy',\n- df['protein'].str.split(';').apply(lambda s: all(x.startswith('DECOY') for x in s)))\n+ if 'decoy_suffix' in kwargs:\n+ kwargs['is_decoy'] = df['protein'].str.split(';').apply(\n+ lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))\nelse:\n- kwargs.setdefault('is_decoy',\n- df['protein'].apply(lambda s: all(x.startswith('DECOY') for x in s)))\n+ kwargs['is_decoy'] = df['protein'].str.split(';').apply(\n+ lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))\n+ else:\n+ if 'decoy_suffix' in kwargs:\n+ kwargs['is_decoy'] = df['protein'].apply(\n+ lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))\n+ else:\n+ kwargs['is_decoy'] = df['protein'].apply(\n+ lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))\nreturn aux.filter(df, **kwargs)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/protxml.py",
"new_path": "pyteomics/protxml.py",
"diff": "@@ -153,7 +153,7 @@ def read(source, read_schema=False, iterative=True, **kwargs):\nchain = aux._make_chain(read, 'read')\n-def is_decoy(pg, prefix='DECOY_'):\n+def _is_decoy_prefix(pg, prefix='DECOY_'):\n\"\"\"Determine if a protein group should be considered decoy.\nThis function checks that all protein names in a group start with `prefix`.\n@@ -174,11 +174,33 @@ def is_decoy(pg, prefix='DECOY_'):\n\"\"\"\nreturn all(p['protein_name'].startswith(prefix) for p in pg['protein'])\n+def _is_decoy_suffix(pg, suffix='_DECOY'):\n+ \"\"\"Determine if a protein group should be considered decoy.\n+\n+ This function checks that all protein names in a group end with `suffix`.\n+ You may need to provide your own function for correct filtering and FDR estimation.\n+\n+ Parameters\n+ ----------\n+\n+ pg : dict\n+ A protein group dict produced by the :py:class:`ProtXML` parser.\n+ suffix : str, optional\n+ A suffix used to mark decoy proteins. Default is `'_DECOY'`.\n-fdr = aux._make_fdr(is_decoy)\n+ Returns\n+ -------\n+\n+ out : bool\n+ \"\"\"\n+ return all(p['protein_name'].endswith(suffix) for p in pg['protein'])\n+\n+is_decoy = _is_decoy_prefix\n+\n+fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)\n_key = op.itemgetter('probability')\n-qvalues = aux._make_qvalues(chain, is_decoy, _key)\n-filter = aux._make_filter(chain, is_decoy, _key, qvalues)\n+qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)\n+filter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, _key, qvalues)\nfilter.chain = aux._make_chain(filter, 'filter', True)\ndef DataFrame(*args, **kwargs):\n@@ -222,7 +244,7 @@ def DataFrame(*args, **kwargs):\nout.update(prot)\nif 'unique_stripped_peptides' in out:\nif sep is not None:\n- out['unique_stripped_peptides'] = sep.join(out['unique_stripped_peptides'].split('+'))\n+ out['unique_stripped_peptides'] = sep.join(out['unique_stripped_peptides'])\nif 'indistinguishable_protein' in out:\nif sep is None:\nout['indistinguishable_protein'] = [p['protein_name'] for p in out['indistinguishable_protein']]\n@@ -266,5 +288,9 @@ def filter_df(*args, **kwargs):\nelse:\nread_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\n- kwargs.setdefault('is_decoy', df['protein_name'].str.startswith('DECOY_'))\n+ if 'is_decoy' not in kwargs:\n+ if 'decoy_suffix' in kwargs:\n+ kwargs['is_decoy'] = df['protein_name'].str.endswith(kwargs['decoy_suffix'])\n+ else:\n+ kwargs['is_decoy'] = df['protein_name'].str.startswith(kwargs.get('decoy_prefix', 'DECOY_'))\nreturn aux.filter(df, **kwargs)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/tandem.py",
"new_path": "pyteomics/tandem.py",
"diff": "@@ -218,9 +218,9 @@ def iterfind(source, path, **kwargs):\nchain = aux._make_chain(read, 'read')\n-def is_decoy(psm, prefix='DECOY_'):\n+def _is_decoy_prefix(psm, prefix='DECOY_'):\n\"\"\"Given a PSM dict, return :py:const:`True` if all protein names for\n- the PSM start with ``prefix``, and :py:const:`False` otherwise.\n+ the PSM start with `prefix`, and :py:const:`False` otherwise.\nParameters\n----------\n@@ -235,10 +235,27 @@ def is_decoy(psm, prefix='DECOY_'):\n\"\"\"\nreturn all(prot['label'].startswith(prefix) for prot in psm['protein'])\n-qvalues = aux._make_qvalues(chain, is_decoy, operator.itemgetter('expect'))\n-filter = aux._make_filter(chain, is_decoy, operator.itemgetter('expect'),\n- qvalues)\n-fdr = aux._make_fdr(is_decoy)\n+def _is_decoy_suffix(psm, suffix='_DECOY'):\n+ \"\"\"Given a PSM dict, return :py:const:`True` if all protein names for\n+ the PSM end with `suffix`, and :py:const:`False` otherwise.\n+\n+ Parameters\n+ ----------\n+ psm : dict\n+ A dict, as yielded by :py:func:`read`.\n+ suffix : str, optional\n+ A suffix used to mark decoy proteins. Default is `'_DECOY'`.\n+\n+ Returns\n+ -------\n+ out : bool\n+ \"\"\"\n+ return all(prot['label'].endswith(suffix) for prot in psm['protein'])\n+\n+is_decoy = _is_decoy_prefix\n+qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, operator.itemgetter('expect'))\n+filter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, operator.itemgetter('expect'), qvalues)\n+fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)\nfilter.chain = aux._make_chain(filter, 'filter', True)\ndef DataFrame(*args, **kwargs):\n@@ -324,10 +341,21 @@ def filter_df(*args, **kwargs):\nelse:\nread_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep'] if k in kwargs}\ndf = DataFrame(*args, **read_kw)\n+\n+ if 'is_decoy' not in kwargs:\nif sep is not None:\n- kwargs.setdefault('is_decoy',\n- df['protein_label'].str.split(sep).apply(lambda s: all(x.startswith('DECOY') for x in s)))\n+ if 'decoy_suffix' in kwargs:\n+ kwargs['is_decoy'] = df['protein_label'].str.split(sep).apply(\n+ lambda s: all(x.endtswith(kwargs['decoy_suffix']) for x in s))\n+ else:\n+ kwargs['is_decoy'] = df['protein_label'].str.split(sep).apply(\n+ lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))\nelse:\n- kwargs.setdefault('is_decoy',\n- df['protein_label'].apply(lambda s: all(x.startswith('DECOY') for x in s)))\n+ if 'decoy_suffix' in kwargs:\n+ kwargs['is_decoy'] = df['protein_label'].apply(\n+ lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))\n+ else:\n+ kwargs['is_decoy'] = df['protein_label'].apply(\n+ lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))\n+\nreturn aux.filter(df, **kwargs)\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add decoy prefix and suffix everywhere |
377,522 | 25.05.2018 17:38:47 | -10,800 | 0866b3f6f7dfb5b5569f00e276e6850df00c1420 | Add more ion_types | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "If the standard :py:func:`is_decoy` function works for your files, you can use these parameters to\nspecify either the prefix or the suffix appended to the protein names in decoy entries.\n+ - New ion types in :py:data:`pyteomics.mass.std_ion_comp`.\n+\n- Bugfixes.\n3.4.2\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mass/mass.py",
"new_path": "pyteomics/mass/mass.py",
"diff": "@@ -414,6 +414,8 @@ std_aa_comp.update({\nstd_ion_comp.update({\n'M': Composition(formula=''),\n+ 'M-H2O': Composition(formula='H-2O-1'),\n+ 'M-NH3': Composition(formula='N-1H-3'),\n'a': Composition(formula='H-2O-1' + 'C-1O-1'),\n'a-H2O': Composition(formula='H-2O-1' + 'C-1O-1' + 'H-2O-1'),\n'a-NH3': Composition(formula='H-2O-1' + 'C-1O-1' + 'N-1H-3'),\n@@ -421,6 +423,10 @@ std_ion_comp.update({\n'b-H2O': Composition(formula='H-2O-1' + 'H-2O-1'),\n'b-NH3': Composition(formula='H-2O-1' + 'N-1H-3'),\n'c': Composition(formula='H-2O-1' + 'NH3'),\n+ 'c-1': Composition(formula='H-2O-1' + 'NH3' + 'H-1'),\n+ 'c-dot': Composition(formula='H-2O-1' + 'NH3' + 'H1'),\n+ 'c+1': Composition(formula='H-2O-1' + 'NH3' + 'H1'),\n+ 'c+2': Composition(formula='H-2O-1' + 'NH3' + 'H2'),\n'c-H2O': Composition(formula='H-2O-1' + 'NH3' + 'H-2O-1'),\n'c-NH3': Composition(formula='H-2O-1'),\n'x': Composition(formula='H-2O-1' + 'CO2'),\n@@ -430,6 +436,10 @@ std_ion_comp.update({\n'y-H2O': Composition(formula='H-2O-1'),\n'y-NH3': Composition(formula='N-1H-3'),\n'z': Composition(formula='H-2O-1' + 'ON-1H-1'),\n+ 'z-dot': Composition(formula='H-2O-1' + 'ON-1'),\n+ 'z+1': Composition(formula='H-2O-1' + 'ON-1H1'),\n+ 'z+2': Composition(formula='H-2O-1' + 'ON-1H2'),\n+ 'z+3': Composition(formula='H-2O-1' + 'ON-1H3'),\n'z-H2O': Composition(formula='H-2O-1' + 'ON-1H-1' + 'H-2O-1'),\n'z-NH3': Composition(formula='H-2O-1' + 'ON-1H-1' + 'N-1H-3'),\n})\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add more ion_types |
377,522 | 25.05.2018 18:51:50 | -10,800 | 31dd8ddadfbbea8336c648c3a4566c7159e3cf26 | Rename FDR to TDA in setup.py extras, add zip_safe=False | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -33,7 +33,7 @@ setup(\n},\nnamespace_packages = ['pyteomics'],\nextras_require = {'XML': ['lxml', 'numpy'],\n- 'FDR': ['numpy'],\n+ 'TDA': ['numpy'],\n'graphics': ['matplotlib'],\n'DF': ['pandas'],\n'Unimod': ['lxml', 'sqlalchemy']},\n@@ -46,4 +46,5 @@ setup(\n'Topic :: Scientific/Engineering :: Physics',\n'Topic :: Software Development :: Libraries'],\nlicense = 'License :: OSI Approved :: Apache Software License',\n+ zip_safe = False,\n)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Rename FDR to TDA in setup.py extras, add zip_safe=False |
377,522 | 28.05.2018 14:20:26 | -10,800 | 2747b67ad84427e73dfcc3187d06fb6ee49a88ce | Add decoy_prefix and decoy_suffix to protxml tests. Version 3.5 | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5b6\n\\ No newline at end of file\n+3.5\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_protxml.py",
"new_path": "tests/test_protxml.py",
"diff": "@@ -41,6 +41,10 @@ class ProtXMLTest(unittest.TestCase):\nq = protxml.qvalues(self.path, **self._kw)\nself.assertEqual(list(q['q']), [0, 1])\n+ def test_qvalues_prefix(self):\n+ q = protxml.qvalues(self.path, decoy_prefix='DECO', **self._kw)\n+ self.assertEqual(list(q['q']), [0, 1])\n+\ndef test_df(self):\ndf = protxml.DataFrame(self.path)\nself.assertEqual(df.shape, (2, 15))\n@@ -48,9 +52,19 @@ class ProtXMLTest(unittest.TestCase):\ndef test_filter_df(self):\nkw = self._kw.copy()\ndel kw['full_output']\n- kw['key'] = 'probability'\n+ del kw['key']\nfdf = protxml.filter_df(self.path, **kw)\nself.assertEqual(fdf.shape, (2, 17))\n+ def test_filter_df_suffix(self):\n+ kw = self._kw.copy()\n+ del kw['full_output']\n+ del kw['key']\n+ kw['remove_decoy'] = True\n+ df = protxml.DataFrame(self.path)\n+ df['protein_name'] = df.protein_name.str.replace(r'DECOY_(.*)', r'\\1_SUF')\n+ fdf = protxml.filter_df(df, decoy_suffix='_SUF', **kw)\n+ self.assertEqual(fdf.shape, (1, 17))\n+\nif __name__ == '__main__':\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add decoy_prefix and decoy_suffix to protxml tests. Version 3.5 |
377,522 | 28.05.2018 14:38:17 | -10,800 | 5ba71c6bfd540d6377b360284d22c1dd6851c15c | Set homepage to documentation | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -23,10 +23,10 @@ setup(\nlong_description = long_description,\nauthor = 'Anton Goloborodko & Lev Levitsky',\nauthor_email = 'pyteomics@googlegroups.com',\n- url = 'http://hg.theorchromo.ru/pyteomics',\n+ url = 'http://pythonhosted.org/pyteomics',\npackages = ['pyteomics', 'pyteomics.mass', 'pyteomics.openms', 'pyteomics.auxiliary'],\nproject_urls = {\n- 'Documentation': 'http://pythonhosted.org/pyteomics/',\n+ 'Documentation': 'http://pythonhosted.org/pyteomics',\n'Source Code' : 'https://bitbucket.org/levitsky/pyteomics',\n'Issue Tracker': 'http://bitbucket.org/levitsky/pyteomics/issues',\n'Mailing List' : 'https://groups.google.com/group/pyteomics',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Set homepage to documentation |
377,522 | 28.05.2018 19:07:10 | -10,800 | 4c3af7f730cdea1ee8908adb52373d2d8a67f4f0 | Remove links to pythonhosted | [
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5\n\\ No newline at end of file\n+3.5.0\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -19,14 +19,14 @@ with open('README') as r, open('INSTALL') as i:\nsetup(\nname = 'pyteomics',\nversion = version,\n- description = '''A framework for proteomics data analysis.''',\n+ description = 'A framework for proteomics data analysis.',\nlong_description = long_description,\nauthor = 'Anton Goloborodko & Lev Levitsky',\nauthor_email = 'pyteomics@googlegroups.com',\n- url = 'http://pythonhosted.org/pyteomics',\n+ url = 'http://pyteomics.readthedocs.io',\npackages = ['pyteomics', 'pyteomics.mass', 'pyteomics.openms', 'pyteomics.auxiliary'],\nproject_urls = {\n- 'Documentation': 'http://pythonhosted.org/pyteomics',\n+ 'Documentation': 'http://pyteomics.readthedocs.io',\n'Source Code' : 'https://bitbucket.org/levitsky/pyteomics',\n'Issue Tracker': 'http://bitbucket.org/levitsky/pyteomics/issues',\n'Mailing List' : 'https://groups.google.com/group/pyteomics',\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove links to pythonhosted |
377,522 | 28.05.2018 19:17:08 | -10,800 | e9320992106eaa2eaa950bbdeca50d33f1b9a942 | Bump version to update doc link on PyPI | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+3.5.1\n+-----\n+\n+Technical release to update the package metadata on PyPI.\n+Project documentation on pythonhosted.org has been deleted.\n+Latest documentation is available at: https://pyteomics.readthedocs.io/.\n+\n3.5\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "VERSION",
"new_path": "VERSION",
"diff": "-3.5.0\n\\ No newline at end of file\n+3.5.1\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Bump version to update doc link on PyPI |
377,522 | 30.05.2018 19:31:36 | -10,800 | 81c152e8fa567380819cba9ba5646dc05bee726d | Add parameter semi in parser.cleave | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "+3.5.2\n+-----\n+\n+ - Add parameter `semi` in :py:func:`pyteomics.parser.cleave`.\n+\n3.5.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -494,7 +494,7 @@ def amino_acid_composition(sequence,\nreturn aa_dict\n@memoize()\n-def cleave(sequence, rule, missed_cleavages=0, min_length=None):\n+def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False):\n\"\"\"Cleaves a polypeptide sequence using a given rule.\nParameters\n@@ -526,6 +526,10 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None):\nyou know what you are doing and apply :py:func:`cleave` to *modX*\nsequences.\n+ semi : bool, optional\n+ Include products of semi-specific cleavage. Default is :py:const:`False`.\n+ This effectively cuts every peptide at every position and adds results to the output.\n+\nReturns\n-------\nout : set\n@@ -540,9 +544,9 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None):\nTrue\n\"\"\"\n- return set(_cleave(sequence, rule, missed_cleavages, min_length))\n+ return set(_cleave(sequence, rule, missed_cleavages, min_length, semi))\n-def _cleave(sequence, rule, missed_cleavages=0, min_length=None):\n+def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False):\n\"\"\"Like :py:func:`cleave`, but the result is a list. Refer to\n:py:func:`cleave` for explanation of parameters.\n\"\"\"\n@@ -550,6 +554,8 @@ def _cleave(sequence, rule, missed_cleavages=0, min_length=None):\nml = missed_cleavages+2\ntrange = range(ml)\ncleavage_sites = deque([0], maxlen=ml)\n+ if min_length is None:\n+ min_length = 1\ncl = 1\nfor i in it.chain([x.end() for x in re.finditer(rule, sequence)],\n[None]):\n@@ -558,9 +564,13 @@ def _cleave(sequence, rule, missed_cleavages=0, min_length=None):\ncl += 1\nfor j in trange[:cl-1]:\nseq = sequence[cleavage_sites[j]:cleavage_sites[-1]]\n- if seq:\n- if min_length is None or len(seq) >= min_length:\n+ if seq and len(seq) >= min_length:\npeptides.append(seq)\n+ if semi:\n+ for k in range(min_length, len(seq)-1):\n+ peptides.append(seq[:k])\n+ for k in range(1, len(seq)-min_length+1):\n+ peptides.append(seq[k:])\nreturn peptides\ndef num_sites(sequence, rule, **kwargs):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_parser.py",
"new_path": "tests/test_parser.py",
"diff": "@@ -5,6 +5,7 @@ import unittest\nfrom pyteomics import parser\nfrom string import ascii_uppercase as uppercase\nimport random\n+\nclass ParserTest(unittest.TestCase):\ndef setUp(self):\nself.simple_sequences = [''.join(random.choice(uppercase) for i in range(\n@@ -60,6 +61,7 @@ class ParserTest(unittest.TestCase):\nsum(comp.values()))\ndef test_cleave(self):\n+ self.assertEqual(parser._cleave('PEPTIDEKS', parser.expasy_rules['trypsin']), ['PEPTIDEK', 'S'])\nfor seq in self.simple_sequences:\nfor elem in parser.cleave(\nseq, parser.expasy_rules['trypsin'], int(random.uniform(1, 10))):\n@@ -67,6 +69,12 @@ class ParserTest(unittest.TestCase):\nself.assertTrue(any(elem == seq\nfor elem in parser.cleave(seq, parser.expasy_rules['trypsin'], len(seq))))\n+ def test_cleave_semi(self):\n+ self.assertEqual(parser._cleave('PEPTIDEKS', parser.expasy_rules['trypsin'], semi=True),\n+ ['PEPTIDEK', 'P', 'PE', 'PEP', 'PEPT', 'PEPTI', 'PEPTID', 'EPTIDEK', 'PTIDEK', 'TIDEK', 'IDEK', 'DEK', 'EK', 'K', 'S'])\n+ self.assertEqual(parser.cleave('PEPTIDEKS', parser.expasy_rules['trypsin'], semi=True),\n+ {'PEPTIDEK', 'P', 'PE', 'PEP', 'PEPT', 'PEPTI', 'PEPTID', 'EPTIDEK', 'PTIDEK', 'TIDEK', 'IDEK', 'DEK', 'EK', 'K', 'S'})\n+\ndef test_cleave_min_length(self):\nfor seq in self.simple_sequences:\nml = random.uniform(1, 5)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add parameter semi in parser.cleave |
377,522 | 31.05.2018 17:46:36 | -10,800 | b50160b158286f357e11c03f862103321c64089d | Add shields.io badges to readme | [
{
"change_type": "MODIFY",
"old_path": "README",
"new_path": "README",
"diff": ":target: http://pyteomics.readthedocs.io/en/latest/?badge=latest\n:alt: Documentation Status\n+.. image:: https://img.shields.io/pypi/v/pyteomics.svg\n+ :target: https://pypi.org/project/pyteomics/\n+ :alt: PyPI\n+\n+.. image:: https://img.shields.io/readthedocs/pyteomics.svg\n+ :target: https://pyteomics.readthedocs.io/\n+ :alt: Read the Docs\n+\n+.. image:: https://img.shields.io/pypi/l/pyteomics.svg\n+ :target: https://www.apache.org/licenses/LICENSE-2.0\n+ :alt: Apache License\n+\n+.. image:: https://img.shields.io/badge/pyteomics-awesome-orange.svg\n+ :alt: Pyteomics is awesome\n+\nWhat is Pyteomics?\n------------------\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add shields.io badges to readme |
377,522 | 01.06.2018 00:38:53 | -10,800 | 6ea9288395b5af7420bfc57791b9e0243b9a8214 | Change license badge | [
{
"change_type": "MODIFY",
"old_path": "README",
"new_path": "README",
"diff": ":target: https://pyteomics.readthedocs.io/\n:alt: Read the Docs (latest)\n-.. image:: https://img.shields.io/pypi/l/pyteomics.svg\n+.. image:: https://img.shields.io/aur/license/python-pyteomics.svg\n:target: https://www.apache.org/licenses/LICENSE-2.0\n:alt: Apache License\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Change license badge |
377,522 | 01.06.2018 00:45:37 | -10,800 | 84839d4137a7911fcd206016e0e349f2a5263e2f | Remove rotten link in INFO | [
{
"change_type": "MODIFY",
"old_path": "INFO",
"new_path": "INFO",
"diff": "@@ -3,10 +3,10 @@ Useful Links\nPyteomics is hosted at the following sites:\n- - Python package @ Python Package Index: https://pypi.org/project/pyteomics\n- - project documentation @ Python.org: http://pythonhosted.org/pyteomics\n- - source code @ Bitbucket: http://hg.theorchromo.ru/pyteomics\n- - mailing list @ Google: https://groups.google.com/group/pyteomics\n+ - Python package @ Python Package Index: https://pypi.org/project/pyteomics/\n+ - project documentation @ Read the Docs: https://pyteomics.readthedocs.io/\n+ - source code @ Bitbucket: http://hg.theorchromo.ru/pyteomics/\n+ - mailing list @ Google: https://groups.google.com/group/pyteomics/\nPyteomics Extensions\n--------------------\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove rotten link in INFO |
377,522 | 07.06.2018 19:17:11 | -10,800 | 700f71fb069e537467ca910d9c64b2570ef026eb | Add encoding parameter in file_writer | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -4,7 +4,6 @@ import codecs\nfrom functools import wraps\nfrom contextlib import contextmanager\n-\ntry:\nbasestring\nexcept NameError:\n@@ -193,11 +192,12 @@ def _file_writer(_mode='a'):\n@wraps(_func)\ndef helper(*args, **kwargs):\nm = kwargs.pop('file_mode', _mode)\n+ enc = kwargs.pop('encoding', None)\nif len(args) > 1:\n- with _file_obj(args[1], m) as out:\n+ with _file_obj(args[1], m, encoding=enc) as out:\nreturn _func(args[0], out, *args[2:], **kwargs)\nelse:\n- with _file_obj(kwargs.pop('output', None), m) as out:\n+ with _file_obj(kwargs.pop('output', None), m, encoding=enc) as out:\nreturn _func(*args, output=out, **kwargs)\nreturn helper\nreturn decorator\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add encoding parameter in file_writer |
377,522 | 08.06.2018 01:13:59 | -10,800 | 5b769efd23bb018d5561fc8c1ff08948c8f43d05 | Speed up mgf.write by using np.savetxt | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "- Add parameter `semi` in :py:func:`pyteomics.parser.cleave`.\n+ - Add new parameter `encoding` in file writers.\n+\n+ - Add new parameters `write_charges` and `use_numpy` in :py:func:`pyteomics.mgf.write`.\n+ Speed up the writing when :py:mod:`numpy` is available.\n+\n3.5.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -330,7 +330,8 @@ _default_value_formatters = {'pepmass': _pepmass_repr, 'charge': _charge_repr}\n@aux._file_writer()\ndef write(spectra, output=None, header='', key_order=_default_key_order,\n- fragment_format='{} {} {}', param_formatters=_default_value_formatters):\n+ fragment_format=None, write_charges=True, use_numpy=None,\n+ param_formatters=_default_value_formatters):\n\"\"\"\nCreate a file in MGF format.\n@@ -362,12 +363,22 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nwritten 'as is'. In case of dict, the keys (must be strings) will be\nuppercased.\n+ write_charges : bool, optional\n+ If :py:const:`False`, fragment charges from 'charge array' will not be written.\n+ Default is :py:const:`True`.\n+\nfragment_format : str, optional\nFormat string for m/z, intensity and charge of a fragment. Useful to set\n- the number of decimal places and/or suppress writing charges, e.g.:\n- ``fragment_format='{:.4f} {:.0f}'``. Default is ``'{} {} {}'``.\n+ the number of decimal places, e.g.:\n+ ``fragment_format='%.4f %.0f'``. Default is ``'{} {} {}'``.\n.. note::\n+ The supported format syntax differs depending on other parameters.\n+ If `use_numpy` is :py:const:`True` and :py:mod:`numpy` is available,\n+ fragment peaks will be written using :py:func:`numpy.savetxt`. Then,\n+ `fragment_format` must be recognized by that function.\n+\n+ Otherwise, plain Python string formatting is done.\nSee `the docs\n<https://docs.python.org/library/string.html#format-specification-mini-language>`_\nfor details on writing the format string.\n@@ -387,10 +398,21 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\ntwo arguments (key and value) and return a string.\nDefault is :py:data:`_default_value_formatters`.\n+ use_numpy : bool, optional\n+ Controls whether fragment peak arrays are written using :py:func:`numpy.savetxt`.\n+ Using :py:func:`numpy.savetxt` is faster, but cannot handle sparse arrays of fragment charges.\n+ You may want to disable this if you need to save spectra with 'charge arrays' with missing values.\n+\n+ If not specified, will be set to the opposite of `write_chrages`.\n+ If :py:mod:`numpy` is not available, this parameter has no effect.\n+\nfile_mode : str, keyword only, optional\nIf `output` is a file name, defines the mode the file will be opened in.\nOtherwise will be ignored. Default is 'a'.\n+ encoding : str, keyword only, optional\n+ Output file encoding (if `output` is specified by name).\n+\nReturns\n-------\n@@ -401,7 +423,17 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\nnones = (None, np.nan, np.ma.masked) if np is not None else (None,)\n+ if fragment_format is None:\n+ fragment_format = '{} {} {}'\n+ np_format_2 = '%.5f %.1f'\n+ np_format_3 = '%.5f %.1f %d'\n+ else:\n+ np_format_2 = np_format_3 = fragment_format\nformat_str = fragment_format + '\\n'\n+\n+ if use_numpy is None:\n+ use_numpy = not write_charges\n+\nif isinstance(header, dict):\nhead_dict = header.copy()\nhead_lines = [key_value_line(k, v) for k, v in header.items()]\n@@ -435,6 +467,25 @@ def write(spectra, output=None, header='', key_order=_default_key_order,\noutput.write(key_value_line(key, val))\ntry:\n+ success = True\n+ if np is not None and use_numpy:\n+ if not write_charges or 'charge array' not in spectrum:\n+ X = np.empty((len(spectrum['m/z array']), 2))\n+ X[:, 0] = spectrum['m/z array']\n+ X[:, 1] = spectrum['intensity array']\n+ np.savetxt(output, X, fmt=np_format_2)\n+ elif isinstance(spectrum.get('charge array'), np.ndarray):\n+ X = np.empty((len(spectrum['m/z array']), 3))\n+ X[:, 0] = spectrum['m/z array']\n+ X[:, 1] = spectrum['intensity array']\n+ X[:, 2] = spectrum['charge array']\n+ np.savetxt(output, X, fmt=np_format_3)\n+ else:\n+ success = False\n+ else:\n+ success = False\n+\n+ if not success:\nfor m, i, c in zip(spectrum['m/z array'],\nspectrum['intensity array'],\nspectrum.get('charge array', it.cycle((None,)))):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Speed up mgf.write by using np.savetxt |
377,522 | 08.06.2018 15:48:50 | -10,800 | 72f863b429f681d49a6c6473bf6dde5c5cbee5a5 | Remove pkgbuild (it is tracked in the AUR repo) | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "-3.5.2\n------\n+dev\n+---\n- Add parameter `semi` in :py:func:`pyteomics.parser.cleave`.\n"
},
{
"change_type": "DELETE",
"old_path": "PKGBUILD",
"new_path": null,
"diff": "-# Maintainer: Lev Levitsky <levlev at mail dot ru>\n-pkgname=python-pyteomics\n-pkgver=3.4.2\n-pkgrel=1\n-pkgdesc=\"A framework for proteomics data analysis.\"\n-arch=('any')\n-url=\"http://pythonhosted.org/pyteomics/\"\n-license=('Apache')\n-depends=('python' 'python-setuptools')\n-optdepends=('python-matplotlib: for pylab_aux module'\n- 'python-sqlalchemy: for mass.unimod module'\n- 'python-pandas: for convenient filtering of CSV tables from search engines'\n- 'python-lxml: for XML parsing modules'\n- 'python-numpy: for most of features, highly recommended')\n-options=(!emptydirs)\n-source=(\"https://pypi.io/packages/source/p/pyteomics/pyteomics-${pkgver}.tar.gz\")\n-md5sums=('36cc4c3bab653fdbe22aec71858a461f')\n-changelog=\"CHANGELOG\"\n-package() {\n- cd \"${srcdir}/pyteomics-${pkgver}\"\n- python setup.py install --root=\"$pkgdir/\" --optimize=1\n-}\n-\n-# vim:set ts=2 sw=2 et:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove pkgbuild (it is tracked in the AUR repo) |
377,522 | 13.06.2018 01:36:44 | -10,800 | 86f0659f4eb6a70d5264ebde0db67344afc392d4 | Start with the indexing version of mgf parser | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/__init__.py",
"new_path": "pyteomics/auxiliary/__init__.py",
"diff": "@@ -15,7 +15,7 @@ from .constants import _nist_mass\nfrom .file_helpers import (\n_file_obj, _keepstate, _keepstate_method, IteratorContextManager,\n- FileReader, _file_reader, _file_writer, _make_chain)\n+ FileReader, IndexedTextReader, _file_reader, _file_writer, _make_chain)\nfrom .math import (\nlinear_regression, linear_regression_perpendicular,\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "import sys\nimport codecs\n-\n+import re\nfrom functools import wraps\nfrom contextlib import contextmanager\n+from collections import OrderedDict\ntry:\nbasestring\n@@ -165,6 +166,92 @@ class FileReader(IteratorContextManager):\nraise AttributeError\nreturn getattr(self._source, attr)\n+def remove_bom(bstr):\n+ return bstr.replace(codecs.BOM_LE, b'').lstrip(b\"\\x00\")\n+\n+class IndexedTextReader(FileReader):\n+ \"\"\"Abstract class for text file readers that keep an index of records for random access.\n+ This requires reading the file in binary mode.\"\"\"\n+\n+ delimiter = None\n+ label = None\n+ block_size = 1000000\n+\n+ def __init__(self, source, func, pass_file, args, kwargs, encoding='utf-8', block_size=None, use_index=True, delimiter=None, label=None):\n+ # the underlying _file_obj gets None as encoding to avoid transparent decoding of StreamReader on read() calls\n+ super(IndexedTextReader, self).__init__(source, 'rb', func, pass_file, args, kwargs, encoding=None)\n+ self.encoding = encoding\n+ if delimiter is not None:\n+ self.delimiter = delimiter\n+ if label is not None:\n+ self.record_label = label\n+ if block_size is not None:\n+ self.block_size = block_size\n+ if use_index:\n+ self._offset_index = self.build_byte_index()\n+ else:\n+ self._offset_index = None\n+\n+ def _chunk_iterator(self):\n+ fh = self._source.file\n+ delim = remove_bom(self.delimiter.encode(self.encoding))\n+ pattern = re.compile(delim)\n+ buff = fh.read(self.block_size)\n+ parts = pattern.split(buff)\n+ started_with_delim = buff.startswith(delim)\n+ tail = parts[-1]\n+ front = parts[:-1]\n+ i = 0\n+ for part in front:\n+ i += 1\n+ if part == b\"\":\n+ continue\n+ if i == 1:\n+ if started_with_delim:\n+ yield delim + part\n+ else:\n+ yield part\n+ else:\n+ yield delim + part\n+ running = True\n+ while running:\n+ buff = fh.read(self.block_size)\n+ if len(buff) == 0:\n+ running = False\n+ buff = tail\n+ else:\n+ buff = tail + buff\n+ parts = pattern.split(buff)\n+ tail = parts[-1]\n+ front = parts[:-1]\n+ for part in front:\n+ yield delim + part\n+ yield delim + tail\n+\n+ def _generate_offsets(self):\n+ i = 0\n+ pattern = re.compile(remove_bom(self.label.encode(self.encoding)))\n+ for chunk in self._chunk_iterator():\n+ match = pattern.search(chunk)\n+ if match:\n+ label = match.group(1)\n+ yield i, label.decode(self.encoding), match\n+ i += len(chunk)\n+ yield i, None, None\n+\n+ def build_byte_index(self):\n+ index = OrderedDict()\n+ g = self._generate_offsets()\n+ last_offset = 0\n+ last_label = None\n+ for offset, label, keyline in g:\n+ if last_label is not None:\n+ index[last_label] = (last_offset, offset)\n+ last_label = label\n+ last_offset = offset\n+ assert last_label is None\n+ return index\n+\ndef _file_reader(_mode='r'):\n# a lot of the code below is borrowed from\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/structures.py",
"new_path": "pyteomics/auxiliary/structures.py",
"diff": "@@ -32,11 +32,10 @@ class Charge(int):\ntry:\nreturn super(Charge, cls).__new__(cls, *args)\nexcept ValueError as e:\n- if isinstance(args[0], str):\n+ if isinstance(args[0], basestring):\ntry:\nnum, sign = re.match(r'^(\\d+)(\\+|-)$', args[0]).groups()\n- return super(Charge, cls).__new__(cls,\n- sign + num, *args[1:], **kwargs)\n+ return super(Charge, cls).__new__(cls, sign + num, *args[1:], **kwargs)\nexcept Exception:\npass\nraise PyteomicsError(*e.args)\n@@ -52,9 +51,9 @@ class ChargeList(list):\n\"\"\"\ndef __init__(self, *args, **kwargs):\n- if args and isinstance(args[0], str):\n- self.extend(map(Charge,\n- re.split(r'(?:,\\s*)|(?:\\s*and\\s*)', args[0])))\n+ if args and isinstance(args[0], basestring):\n+ delim = r'(?:,\\s*)|(?:\\s*and\\s*)'\n+ self.extend(map(Charge, re.split(delim, args[0])))\nelse:\ntry:\nsuper(ChargeList, self).__init__(\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -65,26 +65,8 @@ except ImportError:\nnp = None\nimport itertools as it\n-class MGF(aux.FileReader):\n- \"\"\"\n- A class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\n- parsing. Specific spectra can be accessed by title using the indexing syntax.\n-\n- :py:class:`MGF` object behaves as an iterator, **yielding** spectra one by one.\n- Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n- 'intensity array', 'charge array' and 'params'. 'm/z array' and\n- 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n- 'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,\n- and 'params' stores a :py:class:`dict` of parameters (keys and values are\n- :py:class:`str`, keys corresponding to MGF, lowercased).\n-\n- Attributes\n- ----------\n-\n- header : dict\n- The file header.\n-\n- \"\"\"\n+class MGFBase():\n+ \"\"\"Abstract class representing an MGF file. Subclasses implement different approaches to parsing.\"\"\"\n_comments = set('#;!/')\n_array = (lambda x, dtype: np.array(x, dtype=dtype)) if np is not None else None\n_ma = (lambda x, dtype: np.ma.masked_equal(np.array(x, dtype=dtype), 0)) if np is not None else None\n@@ -96,7 +78,7 @@ class MGF(aux.FileReader):\n}\n_array_keys = ['m/z array', 'intensity array', 'charge array']\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8'):\n\"\"\"Create an MGF file object.\nParameters\n@@ -127,7 +109,7 @@ class MGF(aux.FileReader):\nencoding : str, optional\nEncoding to read the files in. Default is UTF-8.\n\"\"\"\n- super(MGF, self).__init__(source, 'r', self._read, False, (), {}, encoding)\n+\nself._use_header = use_header\nself._convert_arrays = convert_arrays\nif self._convert_arrays and np is None:\n@@ -146,9 +128,9 @@ class MGF(aux.FileReader):\nreturn self._header\n@aux._keepstate_method\n- def _read_header(self):\n+ def _read_header_lines(self, header_lines):\nheader = {}\n- for line in self._source:\n+ for line in header_lines:\nif line.strip() == 'BEGIN IONS':\nbreak\nl = line.split('=')\n@@ -160,29 +142,27 @@ class MGF(aux.FileReader):\nheader['charge'] = aux._parse_charge(header['charge'], True)\nself._header = header\n- def _read(self, **kwargs):\n- for line in self._source:\n- sline = line.strip()\n- if sline == 'BEGIN IONS':\n- spectrum = self._read_spectrum()\n- yield spectrum\n- # otherwise we are not interested; do nothing, just move along\n-\n- def _read_spectrum(self):\n+ def _read_spectrum_lines(self, lines):\n\"\"\"Read a single spectrum from ``self._source``.\nReturns\n-------\nout : dict\n\"\"\"\n+\nmasses = []\nintensities = []\ncharges = []\nparams = self.header.copy() if self._use_header else {}\n- for line in self._source:\n+ for i, line in enumerate(lines):\nsline = line.strip()\n+ if sline == 'BEGIN IONS':\n+ if i == 0:\n+ continue\n+ else:\n+ raise aux.PyteomicsError('Error when parsing MGF: unexpected start of spectrum.')\nif not sline or sline[0] in self._comments:\npass\nelif sline == 'END IONS':\n@@ -221,9 +201,93 @@ class MGF(aux.FileReader):\nexcept IndexError:\npass\n+ def get_spectrum(self, title):\n+ raise NotImplementedError\n+\n+ def __getitem__(self, key):\n+ return self.get_spectrum(key)\n+\n+class IndexedMGF(aux.IndexedTextReader, MGFBase):\n+ \"\"\"\n+ A class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\n+ parsing. Specific spectra can be accessed by title using the indexing syntax.\n+\n+ :py:class:`MGF` object behaves as an iterator, **yielding** spectra one by one.\n+ Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n+ 'intensity array', 'charge array' and 'params'. 'm/z array' and\n+ 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n+ 'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,\n+ and 'params' stores a :py:class:`dict` of parameters (keys and values are\n+ :py:class:`str`, keys corresponding to MGF, lowercased).\n+\n+ Attributes\n+ ----------\n+\n+ header : dict\n+ The file header.\n+\n+ \"\"\"\n+\n+ delimiter = 'BEGIN IONS'\n+ label = u'TITLE=([^\\n]+)\\n'\n+\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8',\n+ block_size=1000000):\n+ aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding)\n+ MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype, encoding)\n+ if block_size is not None:\n+ self.block_size = block_size\n+\n+\n+ def _read_header(self):\n+ first = next(v for v in self._offset_index.values())[0]\n+ header_lines = self.read(first).decode(self.encoding).split('\\n')\n+ return self._read_header_lines(header_lines)\n+\n+\n+ def _read(self, **kwargs):\n+ for spec, offsets in self._offset_index.items():\n+ spectrum = self._read_spectrum(*offsets)\n+ yield spectrum\n+\n+ def _read_spectrum(self, start, end):\n+ \"\"\"Read a single spectrum from ``self._source``.\n+\n+ Returns\n+ -------\n+ out : dict\n+ \"\"\"\n+\n+ self._source.seek(start)\n+ lines = self._source.read(end-start).decode(self.encoding).split('\\n')\n+ return self._read_spectrum_lines(lines)\n+\n+ @aux._keepstate_method\n+ def get_spectrum(self, title):\n+ if self._offset_index is not None and title in self._offset_index:\n+ start, end = self._offset_index[title]\n+ return self._read_spectrum(start, end)\n+\n+\n+class MGF(aux.FileReader, MGFBase):\n+\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8'):\n+ aux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n+ MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype, encoding)\n+\n+ def _read_header(self):\n+ return self._read_header_lines(self._source)\n+\n+ def _read_spectrum(self):\n+ return self._read_spectrum_lines(self._source)\n+\n+ def _read(self):\n+ for line in self._source:\n+ if line.strip() == 'BEGIN IONS':\n+ yield self._read_spectrum()\n+\n@aux._keepstate_method\ndef get_spectrum(self, title):\n- self.reset()\nfor line in self._source:\nsline = line.strip()\nif sline[:5] == 'TITLE' and sline.split('=', 1)[1].strip() == title:\n@@ -231,9 +295,6 @@ class MGF(aux.FileReader):\nspectrum['params']['title'] = title\nreturn spectrum\n- __getitem__ = get_spectrum\n-\n-\ndef read(*args, **kwargs):\n\"\"\"Read an MGF file and return entries iteratively.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Start with the indexing version of mgf parser |
377,522 | 20.06.2018 17:34:38 | -10,800 | 38ac856b0b41ba60e89653546e6201a934f92146 | Set default encoding to None for mgf.MGF for better performance | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -78,7 +78,7 @@ class MGFBase():\n}\n_array_keys = ['m/z array', 'intensity array', 'charge array']\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8'):\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n\"\"\"Create an MGF file object.\nParameters\n@@ -105,9 +105,6 @@ class MGFBase():\ndtype : type or str or dict, optional\ndtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\nKeys should be 'm/z array', 'intensity array' and/or 'charge array'.\n-\n- encoding : str, optional\n- Encoding to read the files in. Default is UTF-8.\n\"\"\"\nself._use_header = use_header\n@@ -234,7 +231,7 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8',\nblock_size=1000000):\naux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding)\n- MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype, encoding)\n+ MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\nif block_size is not None:\nself.block_size = block_size\n@@ -271,9 +268,9 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\nclass MGF(aux.FileReader, MGFBase):\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8'):\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\naux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n- MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype, encoding)\n+ MGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\ndef _read_header(self):\nreturn self._read_header_lines(self._source)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Set default encoding to None for mgf.MGF for better performance |
377,522 | 20.06.2018 18:49:35 | -10,800 | dc8e78013bc4250711e8894d2c3b8e12505f3172 | Add classmethod scan and fixes to IndexedTextReader | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -184,7 +184,7 @@ class IndexedTextReader(FileReader):\nif delimiter is not None:\nself.delimiter = delimiter\nif label is not None:\n- self.record_label = label\n+ self.label = label\nif block_size is not None:\nself.block_size = block_size\nif use_index:\n@@ -252,6 +252,11 @@ class IndexedTextReader(FileReader):\nassert last_label is None\nreturn index\n+ @classmethod\n+ def scan(cls, source, delimiter_text, label_text, encoding='utf-8'):\n+ inst = cls(source, lambda: None, None, (), {}, encoding, delimiter=delimiter_text, label=label_text, use_index=False)\n+ return inst.build_byte_index()\n+\ndef _file_reader(_mode='r'):\n# a lot of the code below is borrowed from\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -230,18 +230,14 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8',\nblock_size=1000000):\n- aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding)\n+ aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding, block_size)\nMGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\n- if block_size is not None:\n- self.block_size = block_size\n-\ndef _read_header(self):\nfirst = next(v for v in self._offset_index.values())[0]\nheader_lines = self.read(first).decode(self.encoding).split('\\n')\nreturn self._read_header_lines(header_lines)\n-\ndef _read(self, **kwargs):\nfor spec, offsets in self._offset_index.items():\nspectrum = self._read_spectrum(*offsets)\n@@ -304,7 +300,7 @@ def read(*args, **kwargs):\n\"\"\"\nreturn MGF(*args, **kwargs)\n-def get_spectrum(source, title, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n+def get_spectrum(source, title, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8'):\n\"\"\"Read one spectrum (with given `title`) from `source`.\nSee :py:func:`read` for explanation of parameters affecting the output.\n@@ -327,8 +323,8 @@ def get_spectrum(source, title, use_header=True, convert_arrays=2, read_charges=\nA dict with the spectrum, if it is found, and None otherwise.\n\"\"\"\n- with MGF(source, use_header=use_header, convert_arrays=convert_arrays,\n- read_charges=read_charges, dtype=dtype) as f:\n+ with IndexedMGF(source, use_header=use_header, convert_arrays=convert_arrays,\n+ read_charges=read_charges, dtype=dtype, encoding=encoding) as f:\nreturn f[title]\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add classmethod scan and fixes to IndexedTextReader |
377,522 | 21.06.2018 18:31:59 | -10,800 | 7baa0828faada104b1c98579a7b53d2042b59f05 | Fix encoding-related annoyances | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -64,6 +64,7 @@ try:\nexcept ImportError:\nnp = None\nimport itertools as it\n+import sys\nclass MGFBase():\n\"\"\"Abstract class representing an MGF file. Subclasses implement different approaches to parsing.\"\"\"\n@@ -77,6 +78,8 @@ class MGFBase():\n'charge array': [_identity, _array, _ma]\n}\n_array_keys = ['m/z array', 'intensity array', 'charge array']\n+ _array_keys_unicode = [u'm/z array', u'intensity array', u'charge array']\n+ encoding = None\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n\"\"\"Create an MGF file object.\n@@ -124,7 +127,6 @@ class MGFBase():\nself._read_header()\nreturn self._header\n- @aux._keepstate_method\ndef _read_header_lines(self, header_lines):\nheader = {}\nfor line in header_lines:\n@@ -171,7 +173,7 @@ class MGFBase():\n'PEPMASS = {}'.format(params['pepmass']))\nelse:\nparams['pepmass'] = pepmass + (None,) * (2-len(pepmass))\n- if isinstance(params.get('charge'), str):\n+ if isinstance(params.get('charge'), aux.basestring):\nparams['charge'] = aux._parse_charge(params['charge'], True)\nout = {'params': params}\ndata = {'m/z array': masses, 'intensity array': intensities}\n@@ -179,6 +181,10 @@ class MGFBase():\ndata['charge array'] = charges\nfor key, values in data.items():\nout[key] = self._array_converters[key][self._convert_arrays](values, dtype=self._dtype_dict.get(key))\n+ if self.encoding and sys.version_info.major == 2:\n+ for key, ukey in zip(self._array_keys + ['params'], self._array_keys_unicode + [u'params']):\n+ if key in out:\n+ out[ukey] = out.pop(key)\nreturn out\nelse:\n@@ -233,6 +239,7 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\naux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding, block_size)\nMGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\n+ @aux._keepstate_method\ndef _read_header(self):\nfirst = next(v for v in self._offset_index.values())[0]\nheader_lines = self.read(first).decode(self.encoding).split('\\n')\n@@ -267,7 +274,9 @@ class MGF(aux.FileReader, MGFBase):\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\naux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\nMGFBase.__init__(self, source, use_header, convert_arrays, read_charges, dtype)\n+ self.encoding = encoding\n+ @aux._keepstate_method\ndef _read_header(self):\nreturn self._read_header_lines(self._source)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/data.py",
"new_path": "tests/data.py",
"diff": "@@ -4,6 +4,8 @@ Bulky data structures for assertion in pyteomics test suites.\nimport numpy as np\nfrom copy import deepcopy\n+import sys\n+from pyteomics.auxiliary import basestring\n# http://stackoverflow.com/q/14246983/1258041\nclass ComparableArray(np.ndarray):\n@@ -1378,6 +1380,32 @@ for s in mgf_spectra_lists:\nfor key in ['m/z array', 'intensity array', 'charge array']:\ns[key] = list(s[key])\n+\n+def decode_dict(d, encoding='utf-8'):\n+ \"\"\"Recursively decode all strings in a dict\"\"\"\n+ out = {}\n+ if isinstance(d, basestring):\n+ return d.decode(encoding)\n+ if not isinstance(d, dict):\n+ return d\n+ for k, v in d.items():\n+ newk = k.decode(encoding)\n+ if isinstance(v, basestring):\n+ out[newk] = v.decode(encoding)\n+ elif isinstance(v, dict):\n+ out[newk] = decode_dict(v, encoding)\n+ elif isinstance(v, list):\n+ out[newk] = [decode_dict(i) for i in v]\n+ else:\n+ out[newk] = v\n+ return out\n+\n+mgf_spectra_long_decoded = [decode_dict(s) for s in mgf_spectra_long\n+ ] if sys.version_info.major == 2 else mgf_spectra_long\n+\n+mgf_spectra_short_decoded = [decode_dict(s) for s in mgf_spectra_short\n+ ] if sys.version_info.major == 2 else mgf_spectra_short\n+\ntandem_spectra = [{'act': '0',\n'expect': 1.5e-07,\n'fI': 48232.2,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -4,11 +4,12 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\n-from pyteomics.mgf import read, write, read_header, MGF\n+from pyteomics.mgf import read, write, read_header, MGF, IndexedMGF\nimport data\nclass MGFTest(unittest.TestCase):\nmaxDiff = None\n+ _encoding = 'utf-8'\ndef setUp(self):\nself.path = 'test.mgf'\nself.header = read_header(self.path)\n@@ -33,6 +34,16 @@ class MGFTest(unittest.TestCase):\nwith func(self.path, False) as reader:\nself.assertEqual(data.mgf_spectra_short, list(reader))\n+ def test_read_decoding(self):\n+ for func in [read, MGF, IndexedMGF]:\n+ self.assertEqual(data.mgf_spectra_long_decoded, list(func(self.path, encoding=self._encoding)))\n+ self.assertEqual(data.mgf_spectra_short_decoded, list(func(self.path, False, encoding=self._encoding)))\n+ with func(self.path, encoding=self._encoding) as reader:\n+ self.assertEqual(data.mgf_spectra_long_decoded, list(reader))\n+ with func(self.path, False, encoding=self._encoding) as reader:\n+ self.assertEqual(data.mgf_spectra_short_decoded, list(reader))\n+ self.assertEqual(data.mgf_spectra_long_decoded, list(func(self.path)))\n+\ndef test_read_no_charges(self):\nwith read(self.path, read_charges=False) as reader:\nself.assertEqual(data.mgf_spectra_long_no_charges, list(reader))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fix encoding-related annoyances |
377,522 | 21.06.2018 19:13:48 | -10,800 | 79bb488d45681422134a1784e41d2c08a31b6705 | Add a primitive get_spectrum test for mgf | [
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -4,7 +4,7 @@ import pyteomics\npyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\n-from pyteomics.mgf import read, write, read_header, MGF, IndexedMGF\n+from pyteomics import mgf\nimport data\nclass MGFTest(unittest.TestCase):\n@@ -12,20 +12,20 @@ class MGFTest(unittest.TestCase):\n_encoding = 'utf-8'\ndef setUp(self):\nself.path = 'test.mgf'\n- self.header = read_header(self.path)\n- self.spectra = list(read(self.path))\n+ self.header = mgf.read_header(self.path)\n+ self.spectra = list(mgf.read(self.path))\nself.tmpfile = tempfile.TemporaryFile(mode='r+')\n- write(header=self.header, spectra=self.spectra, output=self.tmpfile)\n+ mgf.write(header=self.header, spectra=self.spectra, output=self.tmpfile)\nself.tmpfile.seek(0)\n- self.header2 = read_header(self.tmpfile)\n+ self.header2 = mgf.read_header(self.tmpfile)\nself.tmpfile.seek(0)\n- tmpreader = read(self.tmpfile)\n+ tmpreader = mgf.read(self.tmpfile)\nself.spectra2 = list(tmpreader)\nself.ns = len(self.spectra)\nself.tmpfile.close()\ndef test_read(self):\n- for func in [read, MGF]:\n+ for func in [mgf.read, mgf.MGF]:\n# http://stackoverflow.com/q/14246983/1258041\nself.assertEqual(data.mgf_spectra_long, list(func(self.path)))\nself.assertEqual(data.mgf_spectra_short, list(func(self.path, False)))\n@@ -35,7 +35,7 @@ class MGFTest(unittest.TestCase):\nself.assertEqual(data.mgf_spectra_short, list(reader))\ndef test_read_decoding(self):\n- for func in [read, MGF, IndexedMGF]:\n+ for func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:\nself.assertEqual(data.mgf_spectra_long_decoded, list(func(self.path, encoding=self._encoding)))\nself.assertEqual(data.mgf_spectra_short_decoded, list(func(self.path, False, encoding=self._encoding)))\nwith func(self.path, encoding=self._encoding) as reader:\n@@ -45,19 +45,19 @@ class MGFTest(unittest.TestCase):\nself.assertEqual(data.mgf_spectra_long_decoded, list(func(self.path)))\ndef test_read_no_charges(self):\n- with read(self.path, read_charges=False) as reader:\n+ with mgf.read(self.path, read_charges=False) as reader:\nself.assertEqual(data.mgf_spectra_long_no_charges, list(reader))\n- with read(self.path, False, read_charges=False) as reader:\n+ with mgf.read(self.path, False, read_charges=False) as reader:\nself.assertEqual(data.mgf_spectra_short_no_charges, list(reader))\ndef test_read_array_conversion(self):\n- with read(self.path, convert_arrays=0) as reader:\n+ with mgf.read(self.path, convert_arrays=0) as reader:\nself.assertEqual(data.mgf_spectra_lists, list(reader))\n- with read(self.path, convert_arrays=2) as reader:\n+ with mgf.read(self.path, convert_arrays=2) as reader:\ns = next(reader)\nself.assertTrue(isinstance(s['charge array'], np.ma.core.MaskedArray))\nself.assertTrue(isinstance(s['m/z array'], np.ndarray))\n- with read(self.path, convert_arrays=1) as reader:\n+ with mgf.read(self.path, convert_arrays=1) as reader:\ns = next(reader)\nself.assertTrue(isinstance(s['charge array'], np.ndarray))\nself.assertTrue(isinstance(s['m/z array'], np.ndarray))\n@@ -101,10 +101,17 @@ class MGFTest(unittest.TestCase):\ndef test_read_dtype(self):\ndtypes = {'m/z array': np.float32, 'intensity array': np.int32}\n- with read(self.path, dtype=dtypes) as f:\n+ with mgf.read(self.path, dtype=dtypes) as f:\nfor spec in f:\nfor k, v in dtypes.items():\nself.assertEqual(spec[k].dtype, v)\n+ def test_get_spectrum(self):\n+ key = 'Spectrum 2'\n+ f = mgf.IndexedMGF(self.path)\n+ self.assertEqual(f[key], data.mgf_spectra_long[1])\n+ self.assertEqual(f.get_spectrum(key), data.mgf_spectra_long[1])\n+ self.assertEqual(mgf.get_spectrum(self.path, key), data.mgf_spectra_long[1])\n+\nif __name__ == \"__main__\":\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add a primitive get_spectrum test for mgf |
377,522 | 02.07.2018 19:38:23 | -10,800 | 3dd0b30078ebb60eb0d8e6c8a7e0f8832fc4a9d7 | Remove IndexedTextReader.scan and use_index argument | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -177,7 +177,7 @@ class IndexedTextReader(FileReader):\nlabel = None\nblock_size = 1000000\n- def __init__(self, source, func, pass_file, args, kwargs, encoding='utf-8', block_size=None, use_index=True, delimiter=None, label=None):\n+ def __init__(self, source, func, pass_file, args, kwargs, encoding='utf-8', block_size=None, delimiter=None, label=None):\n# the underlying _file_obj gets None as encoding to avoid transparent decoding of StreamReader on read() calls\nsuper(IndexedTextReader, self).__init__(source, 'rb', func, pass_file, args, kwargs, encoding=None)\nself.encoding = encoding\n@@ -187,10 +187,7 @@ class IndexedTextReader(FileReader):\nself.label = label\nif block_size is not None:\nself.block_size = block_size\n- if use_index:\nself._offset_index = self.build_byte_index()\n- else:\n- self._offset_index = None\ndef _chunk_iterator(self):\nfh = self._source.file\n@@ -252,11 +249,6 @@ class IndexedTextReader(FileReader):\nassert last_label is None\nreturn index\n- @classmethod\n- def scan(cls, source, delimiter_text, label_text, encoding='utf-8'):\n- inst = cls(source, lambda: None, None, (), {}, encoding, delimiter=delimiter_text, label=label_text, use_index=False)\n- return inst.build_byte_index()\n-\ndef _file_reader(_mode='r'):\n# a lot of the code below is borrowed from\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove IndexedTextReader.scan and use_index argument |
377,522 | 03.07.2018 17:04:08 | -10,800 | de0436e6f515765a89e5d2602e29f7b8172eca38 | Add dispatching based on use_index to mgf.read, update docs | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -8,6 +8,12 @@ dev\n- Add new parameters `write_charges` and `use_numpy` in :py:func:`pyteomics.mgf.write`.\nSpeed up the writing when :py:mod:`numpy` is available.\n+ - New class :py:class:`pyteomics.mgf.IndexedMGF` is now the recommended way to parse MGF files.\n+ It supports fast access by spectrum titles by using an index of byte offsets.\n+ The old, sequential parser is preserved under its name, :py:class:`pyteomics.mgf.MGF`.\n+ The function :py:func:`pyteomics.mgf.read` now returns an instance of one of the two classes,\n+ based on the `use_index` argument and the type of `source`.\n+\n3.5.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -20,8 +20,12 @@ files.\nClasses\n-------\n- :py:class:`MGF` - a class representing an MGF file. Use it to read spectra\n- from a file consecutively or by title.\n+ :py:class:`MGF` - a text-mode MGF parser. Suitable to read spectra from a file consecutively.\n+ Needs a file opened in text mode (or will open it if given a file name).\n+\n+ :py:class:`IndexedMGF` - a binary-mode MGF parser. When created, builds a byte offset index\n+ for fast random access by spectrum titles. Sequential iteration is also supported.\n+ Needs a seekable file opened in binary mode (or will open it if given a file name).\nFunctions\n---------\n@@ -65,6 +69,8 @@ except ImportError:\nnp = None\nimport itertools as it\nimport sys\n+import warnings\n+warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\\n'\nclass MGFBase():\n\"\"\"Abstract class representing an MGF file. Subclasses implement different approaches to parsing.\"\"\"\n@@ -81,8 +87,9 @@ class MGFBase():\n_array_keys_unicode = [u'm/z array', u'intensity array', u'charge array']\nencoding = None\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n- \"\"\"Create an MGF file object.\n+\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\n+ \"\"\"Create an MGF file object, set MGF-specific parameters.\nParameters\n----------\n@@ -108,6 +115,9 @@ class MGFBase():\ndtype : type or str or dict, optional\ndtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\nKeys should be 'm/z array', 'intensity array' and/or 'charge array'.\n+\n+ encoding : str, optional\n+ File encoding.\n\"\"\"\nself._use_header = use_header\n@@ -210,12 +220,14 @@ class MGFBase():\ndef __getitem__(self, key):\nreturn self.get_spectrum(key)\n+\nclass IndexedMGF(aux.IndexedTextReader, MGFBase):\n\"\"\"\nA class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\n- parsing. Specific spectra can be accessed by title using the indexing syntax.\n+ parsing. Specific spectra can be accessed by title using the indexing syntax in constant time.\n+ If created using a file object, it needs to be opened in binary mode.\n- :py:class:`MGF` object behaves as an iterator, **yielding** spectra one by one.\n+ When iterated, :py:class:`IndexedMGF` object yields spectra one by one.\nEach 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n'intensity array', 'charge array' and 'params'. 'm/z array' and\n'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n@@ -270,6 +282,27 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\nclass MGF(aux.FileReader, MGFBase):\n+ \"\"\"\n+ A class representing an MGF file. Supports the `with` syntax and direct iteration for sequential\n+ parsing. Specific spectra can be accessed by title using the indexing syntax (if the file is seekable),\n+ but it takes linear time to search through the file. Consider using :py:class:`IndexedMGF` for\n+ constant-time access to spectra.\n+\n+ :py:class:`MGF` object behaves as an iterator, **yielding** spectra one by one.\n+ Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',\n+ 'intensity array', 'charge array' and 'params'. 'm/z array' and\n+ 'intensity array' store :py:class:`numpy.ndarray`'s of floats,\n+ 'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,\n+ and 'params' stores a :py:class:`dict` of parameters (keys and values are\n+ :py:class:`str`, keys corresponding to MGF, lowercased).\n+\n+ Attributes\n+ ----------\n+\n+ header : dict\n+ The file header.\n+\n+ \"\"\"\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\naux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n@@ -297,19 +330,75 @@ class MGF(aux.FileReader, MGFBase):\nspectrum['params']['title'] = title\nreturn spectrum\n+\ndef read(*args, **kwargs):\n- \"\"\"Read an MGF file and return entries iteratively.\n+ \"\"\"Returns a reader for a given MGF file. Most of the parameters repeat the\n+ instantiation signature of :py:class:`MGF` and :py:class:`IndexedMGF`.\n+ Additional parameter `use_index` helps decide which class to instantiate\n+ for given `source`.\n+\n+ Parameters\n+ ----------\n- .. note:: This is an alias to :py:class:`MGF`.\n+ source : str or file or None, optional\n+ A file object (or file name) with data in MGF format. Default is\n+ :py:const:`None`, which means read standard input.\n+\n+ use_header : bool, optional\n+ Add the info from file header to each dict. Spectrum-specific parameters\n+ override those from the header in case of conflict.\n+ Default is :py:const:`True`.\n+\n+ convert_arrays : one of {0, 1, 2}, optional\n+ If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.\n+ If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.\n+ If `2`, charges will be reported as a masked array (default).\n+ The default option is the slowest. `1` and `2` require :py:mod:`numpy`.\n+\n+ read_charges : bool, optional\n+ If `True` (default), fragment charges are reported. Disabling it improves performance.\n+\n+ dtype : type or str or dict, optional\n+ dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.\n+ Keys should be 'm/z array', 'intensity array' and/or 'charge array'.\n+\n+ encoding : str, optional\n+ File encoding.\n+\n+ use_index : bool, optional\n+ Determines which parsing method to use. If :py:const:`True` (default), an instance of\n+ :py:class:`IndexedMGF` is created. This facilitates random access by spectrum titles.\n+ If an open file is passed as `source`, it needs to be open in binary mode.\n+\n+ If :py:const:`False`, an instance of :py:class:`SequentialMGF` is created. It reads\n+ `source` in text mode and is suitable for iterative parsing. Access by spectrum title\n+ requires linear search and thus takes linear time.\n+\n+ block_size : int, optinal\n+ Size of the chunk (in bytes) used to parse the file when creating the byte offset index.\n+ (Accepted only for :py:class:`IndexedMGF`.)\nReturns\n-------\n- out : MGF\n+ out : MGFBase\n+ Instance of :py:class:`MGF` or :py:class:`IndexedMGF`.\n\"\"\"\n- return MGF(*args, **kwargs)\n+ if args:\n+ source = args[0]\n+ else:\n+ source = kwargs.get('source')\n+ use_index = kwargs.pop('use_index', True)\n+ if 'b' in getattr(source, 'mode', 'b') and use_index:\n+ tp = IndexedMGF\n+ else:\n+ if use_index:\n+ warnings.warn('use_index is True, but the file mode is not binary. Setting use_index to False')\n+ tp = MGF\n+ return tp(*args, **kwargs)\n+\n-def get_spectrum(source, title, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8'):\n+def get_spectrum(source, title, *args, **kwargs):\n\"\"\"Read one spectrum (with given `title`) from `source`.\nSee :py:func:`read` for explanation of parameters affecting the output.\n@@ -332,8 +421,7 @@ def get_spectrum(source, title, use_header=True, convert_arrays=2, read_charges=\nA dict with the spectrum, if it is found, and None otherwise.\n\"\"\"\n- with IndexedMGF(source, use_header=use_header, convert_arrays=convert_arrays,\n- read_charges=read_charges, dtype=dtype, encoding=encoding) as f:\n+ with read(source, *args, **kwargs) as f:\nreturn f[title]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -108,10 +108,11 @@ class MGFTest(unittest.TestCase):\ndef test_get_spectrum(self):\nkey = 'Spectrum 2'\n- f = mgf.IndexedMGF(self.path)\n- self.assertEqual(f[key], data.mgf_spectra_long[1])\n- self.assertEqual(f.get_spectrum(key), data.mgf_spectra_long[1])\n- self.assertEqual(mgf.get_spectrum(self.path, key), data.mgf_spectra_long[1])\n+ for klass in [mgf.MGF, mgf.IndexedMGF]:\n+ f = klass(self.path)\n+ self.assertEqual(data.mgf_spectra_long[1], f[key])\n+ self.assertEqual(data.mgf_spectra_long[1], f.get_spectrum(key))\n+ self.assertEqual(data.mgf_spectra_long[1], mgf.get_spectrum(self.path, key))\nif __name__ == \"__main__\":\nunittest.main()\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add dispatching based on use_index to mgf.read, update docs |
377,522 | 09.07.2018 22:31:18 | -10,800 | b1bc2d19fdfd6f940ec9bc19ce79a4dba1c0c742 | Add exception logic to parser.cleave to address issue | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "dev\n---\n- - Add parameter `semi` in :py:func:`pyteomics.parser.cleave`.\n+ - Add parameters `semi` and `exception` in :py:func:`pyteomics.parser.cleave`.\n- Add new parameter `encoding` in file writers.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/parser.py",
"new_path": "pyteomics/parser.py",
"diff": "@@ -494,7 +494,8 @@ def amino_acid_composition(sequence,\nreturn aa_dict\n@memoize()\n-def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False):\n+def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False,\n+ exception=None):\n\"\"\"Cleaves a polypeptide sequence using a given rule.\nParameters\n@@ -530,6 +531,11 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False):\nInclude products of semi-specific cleavage. Default is :py:const:`False`.\nThis effectively cuts every peptide at every position and adds results to the output.\n+ exception : str, optional\n+ Exceptions to the cleavage rule. If specified, should be a regular expression.\n+ Cleavage sites matching `rule` will be checked against `exception` and omitted\n+ if they match.\n+\nReturns\n-------\nout : set\n@@ -544,9 +550,9 @@ def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False):\nTrue\n\"\"\"\n- return set(_cleave(sequence, rule, missed_cleavages, min_length, semi))\n+ return set(_cleave(sequence, rule, missed_cleavages, min_length, semi, exception))\n-def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False):\n+def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exception=None):\n\"\"\"Like :py:func:`cleave`, but the result is a list. Refer to\n:py:func:`cleave` for explanation of parameters.\n\"\"\"\n@@ -557,8 +563,12 @@ def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False):\nif min_length is None:\nmin_length = 1\ncl = 1\n+ if exception is not None:\n+ exceptions = {x.end() for x in re.finditer(exception, sequence)}\nfor i in it.chain([x.end() for x in re.finditer(rule, sequence)],\n[None]):\n+ if exception is not None and i in exceptions:\n+ continue\ncleavage_sites.append(i)\nif cl < ml:\ncl += 1\n@@ -635,7 +645,8 @@ expasy_rules = {\n'thermolysin': r'[^DE](?=[AFILMV])',\n'thrombin': r'((?<=G)R(?=G))|'\nr'((?<=[AFGILTVM][AFGILTVWA]P)R(?=[^DE][^DE]))',\n- 'trypsin': r'([KR](?=[^P]))|((?<=W)K(?=P))|((?<=M)R(?=P))'\n+ 'trypsin': r'([KR](?=[^P]))|((?<=W)K(?=P))|((?<=M)R(?=P))',\n+ 'trypsin_exception': r'((?<=[CD])K(?=D))|((?<=C)K(?=[HY]))|((?<=C)R(?=K))|((?<=R)R(?=[HR]))',\n}\n\"\"\"\nThis dict contains regular expressions for cleavage rules of the most\n@@ -643,6 +654,16 @@ popular proteolytic enzymes. The rules were taken from the\n`PeptideCutter tool\n<http://ca.expasy.org/tools/peptidecutter/peptidecutter_enzymes.html>`_\nat Expasy.\n+\n+.. note::\n+ 'trypsin_exception' can be used as `exception` argument when calling\n+ :py:func:`cleave` with 'trypsin' `rule`::\n+\n+ >>> parser.cleave('PEPTIDKDE', parser.expasy_rules['trypsin'])\n+ {'DE', 'PEPTIDK'}\n+ >>> parser.cleave('PEPTIDKDE', parser.expasy_rules['trypsin'], \\\n+exception=parser.expasy_rules['trypsin_exception'])\n+ {'PEPTIDKDE'}\n\"\"\"\ndef isoforms(sequence, **kwargs):\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add exception logic to parser.cleave to address issue #29 |
377,522 | 10.07.2018 23:24:59 | -10,800 | b6ecf6a9ab92ec93bc8874d1c216d98b361ef0af | First draft of IndexedFASTA, update fasta tests | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -249,6 +249,10 @@ class IndexedTextReader(FileReader):\nassert last_label is None\nreturn index\n+ def _read_lines_from_offsets(self, start, end):\n+ self._source.seek(start)\n+ lines = self._source.read(end-start).decode(self.encoding).split('\\n')\n+ return lines\ndef _file_reader(_mode='r'):\n# a lot of the code below is borrowed from\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -77,6 +77,118 @@ from . import auxiliary as aux\nProtein = namedtuple('Protein', ('description', 'sequence'))\n+class FASTABase():\n+ parser = staticmethod(lambda x: x)\n+ _ignore_comments = False\n+ _comments = set('>;')\n+\n+ def __init__(self, source, ignore_comments=False, parser=None):\n+ self._ignore_comments = ignore_comments\n+ if parser is not None:\n+ self.parser = parser\n+\n+ def _is_comment(self, line):\n+ return line[0] in self._comments\n+\n+ def _read_protein_lines(self, lines):\n+ description = []\n+ sequence = []\n+\n+ for string in lines:\n+ stripped_string = string.strip()\n+ if not stripped_string:\n+ continue\n+\n+ is_comment = self._is_comment(stripped_string)\n+ if is_comment:\n+ if (not description) or (not self._ignore_comments):\n+ description.append(stripped_string[1:])\n+ elif not description:\n+ # we are not reading an entry from the beginning\n+ continue\n+ else:\n+ sequence.append(stripped_string)\n+\n+ description = ' '.join(description)\n+ sequence = ''.join(sequence)\n+ # Drop the translation stop sign.\n+ if sequence and sequence[-1] == '*':\n+ sequence = sequence[:-1]\n+ return Protein(self.parser(description), sequence)\n+\n+ def get_entry(self, key):\n+ raise NotImplementedError\n+\n+ def __getitem__(self, key):\n+ return self.get_entry(key)\n+\n+\n+class FASTA(aux.FileReader, FASTABase):\n+ def __init__(self, source, ignore_comments=False, parser=None, encoding=None):\n+ aux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n+ FASTABase.__init__(self, source, ignore_comments, parser)\n+ self.encoding = encoding\n+\n+ def _read(self):\n+ accumulated_strings = []\n+\n+ # Iterate through '>' after the file is over to retrieve the last entry.\n+ for string in itertools.chain(self._source, '>'):\n+ stripped_string = string.strip()\n+\n+ # Skip empty lines.\n+ if not stripped_string:\n+ continue\n+\n+ is_comment = self._is_comment(stripped_string)\n+ if is_comment:\n+ # If it is a continuing comment\n+ if len(accumulated_strings) == 1:\n+ if not self._ignore_comments:\n+ accumulated_strings[0] += (' ' + stripped_string[1:])\n+ else:\n+ continue\n+\n+ elif accumulated_strings:\n+ description = accumulated_strings[0]\n+ sequence = ''.join(accumulated_strings[1:])\n+\n+ # Drop the translation stop sign.\n+ if sequence.endswith('*'):\n+ sequence = sequence[:-1]\n+ yield Protein(self.parser(description), sequence)\n+ accumulated_strings = [stripped_string[1:]]\n+ else:\n+ # accumulated_strings is empty; we're probably reading\n+ # the very first line of the file\n+ accumulated_strings.append(stripped_string[1:])\n+ else:\n+ accumulated_strings.append(stripped_string)\n+\n+\n+class IndexedFASTA(aux.IndexedTextReader, FASTABase):\n+ delimiter = '>'\n+ label = r'^>(.*)'\n+ _comments = {'>'}\n+\n+ def __init__(self, source, ignore_comments=False, parser=None, encoding='utf-8', block_size=None):\n+ aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding, block_size)\n+ FASTABase.__init__(self, source, ignore_comments, parser)\n+\n+ def _entry_from_offsets(self, start, end):\n+ lines = self._read_lines_from_offsets(start, end)\n+ return self._read_protein_lines(lines)\n+\n+ def _read(self, **kwargs):\n+ for key, offsets in self._offset_index.items():\n+ yield self._entry_from_offsets(*offsets)\n+\n+ def get_entry(self, key):\n+ offsets = self._offset_index.get(key)\n+ if offsets is not None:\n+ return self._entry_from_offsets(*offsets)\n+\n+\n@aux._file_reader()\ndef read(source=None, ignore_comments=False, parser=None):\n\"\"\"Read a FASTA file and return entries iteratively.\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/mgf.py",
"new_path": "pyteomics/mgf.py",
"diff": "@@ -91,7 +91,7 @@ class MGFBase():\nencoding = None\n- def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding=None):\n+ def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None):\n\"\"\"Create an MGF file object, set MGF-specific parameters.\nParameters\n@@ -247,7 +247,7 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\n\"\"\"\ndelimiter = 'BEGIN IONS'\n- label = u'TITLE=([^\\n]+)\\n'\n+ label = 'TITLE=([^\\n]+)\\n'\ndef __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True, dtype=None, encoding='utf-8',\nblock_size=1000000):\n@@ -272,14 +272,12 @@ class IndexedMGF(aux.IndexedTextReader, MGFBase):\n-------\nout : dict\n\"\"\"\n-\n- self._source.seek(start)\n- lines = self._source.read(end-start).decode(self.encoding).split('\\n')\n+ lines = self._read_lines_from_offsets(start, end)\nreturn self._read_spectrum_lines(lines)\n- @aux._keepstate_method\n+ # @aux._keepstate_method\ndef get_spectrum(self, title):\n- if self._offset_index is not None and title in self._offset_index:\n+ if title in self._offset_index:\nstart, end = self._offset_index[title]\nreturn self._read_spectrum(start, end)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -8,22 +8,37 @@ import random\nimport string\nclass FastaTest(unittest.TestCase):\n+ maxDiff = None\ndef setUp(self):\nself.fasta_file = 'test.fasta'\n- self.fasta_entries_short = list(fasta.read(self.fasta_file, ignore_comments=True))\n- self.fasta_entries_long = list(fasta.read(self.fasta_file))\n+ self.fasta_entries_long = [\n+ ('test sequence test sequence 2', 'TEST'),\n+ ('test sequence 3', 'TEST'),\n+ ('test sequence 4', 'TEST')\n+ ]\n+ self.fasta_entries_short = [\n+ ('test sequence', 'TEST'),\n+ ('test sequence 3', 'TEST'),\n+ ('test sequence 4', 'TEST')\n+ ]\ndef test_simple_read_long_comments(self):\n- self.assertEqual(self.fasta_entries_long,\n- [('test sequence test sequence 2', 'TEST'),\n- ('test sequence 3', 'TEST'),\n- ('test sequence 4', 'TEST')])\n+ for reader in [fasta.read, fasta.FASTA]:\n+ self.assertEqual(self.fasta_entries_long, list(reader(self.fasta_file)))\ndef test_simple_read_short_comments(self):\n+ for reader in [fasta.read, fasta.FASTA]:\nself.assertEqual(self.fasta_entries_short,\n- [('test sequence', 'TEST'),\n- ('test sequence 3', 'TEST'),\n- ('test sequence 4', 'TEST')])\n+ list(reader(self.fasta_file, ignore_comments=True)))\n+\n+ def test_indexed_read(self):\n+ self.assertEqual(self.fasta_entries_short[1:],\n+ list(fasta.IndexedFASTA(self.fasta_file)))\n+\n+ def test_index_retrieve(self):\n+ key = 'test sequence 4'\n+ with fasta.IndexedFASTA(self.fasta_file) as ir:\n+ self.assertEqual(self.fasta_entries_short[2], ir[key])\ndef test_decoy_sequence_reverse(self):\nsequence = ''.join(random.choice(string.ascii_uppercase)\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | First draft of IndexedFASTA, update fasta tests |
377,522 | 10.07.2018 23:45:46 | -10,800 | 1bda3108ea9544b4f12768daf36a727674f851ac | Remove the use of regex in record delimiters | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -192,9 +192,8 @@ class IndexedTextReader(FileReader):\ndef _chunk_iterator(self):\nfh = self._source.file\ndelim = remove_bom(self.delimiter.encode(self.encoding))\n- pattern = re.compile(delim)\nbuff = fh.read(self.block_size)\n- parts = pattern.split(buff)\n+ parts = buff.split(delim)\nstarted_with_delim = buff.startswith(delim)\ntail = parts[-1]\nfront = parts[:-1]\n@@ -218,7 +217,7 @@ class IndexedTextReader(FileReader):\nbuff = tail\nelse:\nbuff = tail + buff\n- parts = pattern.split(buff)\n+ parts = buff.split(delim)\ntail = parts[-1]\nfront = parts[:-1]\nfor part in front:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_mgf.py",
"new_path": "tests/test_mgf.py",
"diff": "@@ -36,8 +36,10 @@ class MGFTest(unittest.TestCase):\ndef test_read_decoding(self):\nfor func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:\n- self.assertEqual(data.mgf_spectra_long_decoded, list(func(self.path, encoding=self._encoding)))\n- self.assertEqual(data.mgf_spectra_short_decoded, list(func(self.path, False, encoding=self._encoding)))\n+ self.assertEqual(data.mgf_spectra_long_decoded,\n+ list(func(self.path, encoding=self._encoding)))\n+ self.assertEqual(data.mgf_spectra_short_decoded,\n+ list(func(self.path, False, encoding=self._encoding)))\nwith func(self.path, encoding=self._encoding) as reader:\nself.assertEqual(data.mgf_spectra_long_decoded, list(reader))\nwith func(self.path, False, encoding=self._encoding) as reader:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Remove the use of regex in record delimiters |
377,522 | 11.07.2018 01:35:48 | -10,800 | 17e572bfeb1ac3f37a5b8a628ffb434147af37eb | Parametrize label group, use it as key in offset index | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/auxiliary/file_helpers.py",
"new_path": "pyteomics/auxiliary/file_helpers.py",
"diff": "@@ -176,9 +176,12 @@ class IndexedTextReader(FileReader):\ndelimiter = None\nlabel = None\nblock_size = 1000000\n+ label_group = 1\n- def __init__(self, source, func, pass_file, args, kwargs, encoding='utf-8', block_size=None, delimiter=None, label=None):\n- # the underlying _file_obj gets None as encoding to avoid transparent decoding of StreamReader on read() calls\n+ def __init__(self, source, func, pass_file, args, kwargs, encoding='utf-8', block_size=None,\n+ delimiter=None, label=None, label_group=None):\n+ # the underlying _file_obj gets None as encoding\n+ # to avoid transparent decoding of StreamReader on read() calls\nsuper(IndexedTextReader, self).__init__(source, 'rb', func, pass_file, args, kwargs, encoding=None)\nself.encoding = encoding\nif delimiter is not None:\n@@ -187,6 +190,8 @@ class IndexedTextReader(FileReader):\nself.label = label\nif block_size is not None:\nself.block_size = block_size\n+ if label_group is not None:\n+ self.label_group = label_group\nself._offset_index = self.build_byte_index()\ndef _chunk_iterator(self):\n@@ -230,7 +235,7 @@ class IndexedTextReader(FileReader):\nfor chunk in self._chunk_iterator():\nmatch = pattern.search(chunk)\nif match:\n- label = match.group(1)\n+ label = match.group(self.label_group)\nyield i, label.decode(self.encoding), match\ni += len(chunk)\nyield i, None, None\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -90,32 +90,6 @@ class FASTABase():\ndef _is_comment(self, line):\nreturn line[0] in self._comments\n- def _read_protein_lines(self, lines):\n- description = []\n- sequence = []\n-\n- for string in lines:\n- stripped_string = string.strip()\n- if not stripped_string:\n- continue\n-\n- is_comment = self._is_comment(stripped_string)\n- if is_comment:\n- if (not description) or (not self._ignore_comments):\n- description.append(stripped_string[1:])\n- elif not description:\n- # we are not reading an entry from the beginning\n- continue\n- else:\n- sequence.append(stripped_string)\n-\n- description = ' '.join(description)\n- sequence = ''.join(sequence)\n- # Drop the translation stop sign.\n- if sequence and sequence[-1] == '*':\n- sequence = sequence[:-1]\n- return Protein(self.parser(description), sequence)\n-\ndef get_entry(self, key):\nraise NotImplementedError\n@@ -169,11 +143,35 @@ class FASTA(aux.FileReader, FASTABase):\nclass IndexedFASTA(aux.IndexedTextReader, FASTABase):\ndelimiter = '>'\nlabel = r'^>(.*)'\n- _comments = {'>'}\n- def __init__(self, source, ignore_comments=False, parser=None, encoding='utf-8', block_size=None):\n- aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding, block_size)\n- FASTABase.__init__(self, source, ignore_comments, parser)\n+ def __init__(self, source, ignore_comments=False, encoding='utf-8', block_size=None,\n+ delimiter=None, label=None, label_group=None):\n+ aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding,\n+ block_size, delimiter, label, label_group)\n+ FASTABase.__init__(self, source, ignore_comments)\n+\n+ def _read_protein_lines(self, lines):\n+ description = []\n+ sequence = []\n+\n+ for string in lines:\n+ stripped_string = string.strip()\n+ if not stripped_string:\n+ continue\n+\n+ is_comment = self._is_comment(stripped_string)\n+ if is_comment:\n+ if not description or not self._ignore_comments:\n+ description.append(stripped_string[1:])\n+ else:\n+ sequence.append(stripped_string)\n+\n+ description = ' '.join(description)\n+ sequence = ''.join(sequence)\n+ # Drop the translation stop sign.\n+ if sequence and sequence[-1] == '*':\n+ sequence = sequence[:-1]\n+ return Protein(self.parser(description), sequence)\ndef _entry_from_offsets(self, start, end):\nlines = self._read_lines_from_offsets(start, end)\n@@ -512,7 +510,7 @@ def write_decoy_db(source=None, output=None, mode='reverse', prefix='DECOY_',\n# auxiliary functions for parsing of FASTA headers\ndef _split_pairs(s):\nreturn dict(map(lambda x: x.strip(), x.split('='))\n- for x in re.split(' (?=\\w+=)', s.strip()))\n+ for x in re.split(r' (?=\\w+=)', s.strip()))\ndef _intify(d, keys):\nfor k in keys:\n@@ -520,10 +518,9 @@ def _intify(d, keys):\nd[k] = int(d[k])\n# definitions for custom parsers\n+_uniprotkb_header = r'^(\\w+)\\|([-\\w]+)\\|(\\w+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\ndef _parse_uniprotkb(header):\n- db, ID, entry, name, pairs, _ = re.match(\n- r'^(\\w+)\\|([-\\w]+)\\|(\\w+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$',\n- header).groups()\n+ db, ID, entry, name, pairs, _ = re.match(_uniprotkb_header, header).groups()\ngid, taxon = entry.split('_')\ninfo = {'db': db, 'id': ID, 'entry': entry,\n'name': name, 'gene_id': gid, 'taxon': taxon}\n@@ -531,29 +528,27 @@ def _parse_uniprotkb(header):\n_intify(info, ('PE', 'SV'))\nreturn info\n+_uniref_header = r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\ndef _parse_uniref(header):\nassert 'Tax' in header\n- ID, cluster, pairs, _ = re.match(\n- r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$',\n- header).groups()\n+ ID, cluster, pairs, _ = re.match(_uniref_header, header).groups()\ninfo = {'id': ID, 'cluster': cluster}\ninfo.update(_split_pairs(pairs))\ngid, taxon = info['RepID'].split('_')\ntype_, acc = ID.split('_')\n- info.update({'taxon': taxon, 'gene_id': gid,\n- 'type': type_, 'accession': acc})\n+ info.update({'taxon': taxon, 'gene_id': gid, 'type': type_, 'accession': acc})\n_intify(info, ('n',))\nreturn info\n+_uniparc_header = r'(\\S+)\\s+status=(\\w+)\\s*$'\ndef _parse_uniparc(header):\n- ID, status = re.match(r'(\\S+)\\s+status=(\\w+)\\s*$', header).groups()\n+ ID, status = re.match(_uniparc_header, header).groups()\nreturn {'id': ID, 'status': status}\n+_unimes_header = r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\ndef _parse_unimes(header):\nassert 'OS=' in header and 'SV=' in header and 'PE=' not in header\n- ID, name, pairs, _ = re.match(\n- r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$',\n- header).groups()\n+ ID, name, pairs, _ = re.match(_unimes_header, header).groups()\ninfo = {'id': ID, 'name': name}\ninfo.update(_split_pairs(pairs))\n_intify(info, ('SV',))\n@@ -572,6 +567,9 @@ std_parsers = {'uniprotkb': _parse_uniprotkb, 'uniref': _parse_uniref,\nformats are those described at\n`UniProt help page <http://www.uniprot.org/help/fasta-headers>`_.\"\"\"\n+_std_patterns = {'uniprotkb': _uniprotkb_header, 'uniref': _uniref_header,\n+ 'uniparc': _uniparc_header, 'unimes': _unimes_header}\n+\ndef parse(header, flavour='auto', parsers=None):\n\"\"\"Parse the FASTA header and return a nice dictionary.\n@@ -599,16 +597,16 @@ def parse(header, flavour='auto', parsers=None):\nflavour.\"\"\"\n# accept strings with and without leading '>'\n- if header.startswith('>'):\n+ if header and header[0] == '>':\nheader = header[1:]\n# choose the format\nknown = parsers or std_parsers\nif flavour.lower() == 'auto':\n- for fl, parser in known.items():\n+ for parser in known.values():\ntry:\nreturn parser(header)\n- except:\n+ except Exception:\npass\nraise aux.PyteomicsError('Unknown FASTA header format: ' + header)\nelif flavour.lower() in known:\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Parametrize label group, use it as key in offset index |
377,522 | 11.07.2018 01:40:47 | -10,800 | 318e74d6853bc02fc2daf9b1d938d67bba5d6fd8 | Bring back parser arg in IndexedFASTA | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -144,11 +144,11 @@ class IndexedFASTA(aux.IndexedTextReader, FASTABase):\ndelimiter = '>'\nlabel = r'^>(.*)'\n- def __init__(self, source, ignore_comments=False, encoding='utf-8', block_size=None,\n+ def __init__(self, source, ignore_comments=False, parser=None, encoding='utf-8', block_size=None,\ndelimiter=None, label=None, label_group=None):\naux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding,\nblock_size, delimiter, label, label_group)\n- FASTABase.__init__(self, source, ignore_comments)\n+ FASTABase.__init__(self, source, ignore_comments, parser)\ndef _read_protein_lines(self, lines):\ndescription = []\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Bring back parser arg in IndexedFASTA |
377,522 | 11.07.2018 18:13:45 | -10,800 | 1b19872f850b9294073f040f01196de9ca25a82f | Draft TwoLayerIndexedFASTA class | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -187,6 +187,35 @@ class IndexedFASTA(aux.IndexedTextReader, FASTABase):\nreturn self._entry_from_offsets(*offsets)\n+class TwoLayerIndexedFASTA(IndexedFASTA):\n+ header_group = 1\n+ def __init__(self, source, header_pattern, header_group=None,\n+ ignore_comments=False, parser=None, encoding='utf-8', block_size=None,\n+ delimiter=None, label=None, label_group=None):\n+ super(TwoLayerIndexedFASTA, self).__init__(source, ignore_comments, parser,\n+ encoding, block_size, delimiter, label, label_group)\n+ if header_group is not None:\n+ self.header_group = header_group\n+ self.header_pattern = header_pattern\n+ self.build_second_index()\n+\n+ def build_second_index(self):\n+ index = {}\n+ for key in self._offset_index:\n+ match = re.match(self.header_pattern, key)\n+ if match:\n+ index[match.group(self.header_group)] = key\n+ self._id2header = index\n+\n+ def get_entry(self, key):\n+ raw = super(TwoLayerIndexedFASTA, self).get_entry(key)\n+ if raw is not None:\n+ return raw\n+ header = self._id2header.get(key)\n+ if header is not None:\n+ return super(TwoLayerIndexedFASTA, self).get_entry(header)\n+\n+\n@aux._file_reader()\ndef read(source=None, ignore_comments=False, parser=None):\n\"\"\"Read a FASTA file and return entries iteratively.\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Draft TwoLayerIndexedFASTA class |
377,522 | 12.07.2018 00:49:59 | -10,800 | 57d099f848bd1ed1c3ad2400ff43216391c37ccb | Fill in some missing docstrings, update tests in fasta | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG",
"new_path": "CHANGELOG",
"diff": "@@ -13,6 +13,20 @@ dev\nThe old, sequential parser is preserved under its name, :py:class:`pyteomics.mgf.MGF`.\nThe function :py:func:`pyteomics.mgf.read` now returns an instance of one of the two classes,\nbased on the `use_index` argument and the type of `source`.\n+ The common ancestor class, :py:class:`pyteomics.mgf.MGFBase`, can be used for type checking.\n+\n+ - New FASTA parsing classes:\n+\n+ - :py:class:`pyteomics.fasta.FASTABase` - common ancestor, suitable for type checking;\n+\n+ - :py:class:`pyteomics.fasta.FASTA` - text-mode, sequential parser. Does\n+ what the old :py:func:`fasta.read` was doing;\n+\n+ - :py:class:`pyteomics.fasta.IndexedFASTA` - binary-mode, indexing parser.\n+ Supports direct indexing by header string;\n+\n+ - :py:class:`pyteomics.fasta.TwoLayerIndexedFASTA` - additionally supports\n+ indexing by extracted header fields;\n3.5.1\n-----\n"
},
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -9,6 +9,34 @@ for the most detailed information on the format.\nData manipulation\n-----------------\n+Classes\n+.......\n+\n+Several classes of FASTA parsers are available. All of them have common features:\n+\n+ - context manager support;\n+\n+ - header parsing;\n+\n+ - direct iteration.\n+\n+Available classes:\n+\n+ :py:class:`pyteomics.fasta.FASTABase` - common ancestor, suitable for type checking.\n+ Abstract class.\n+\n+ :py:class:`pyteomics.fasta.FASTA` - text-mode, sequential parser.\n+ Good for iteration over database entries.\n+\n+ :py:class:`pyteomics.fasta.IndexedFASTA` - binary-mode, indexing parser.\n+ Supports direct indexing by header string.\n+\n+ :py:class:`pyteomics.fasta.TwoLayerIndexedFASTA` - additionally supports\n+ indexing by extracted header fields.\n+\n+Functions\n+.........\n+\n:py:func:`read` - iterate through entries in a FASTA database.\n:py:func:`chain` - read multiple files at once.\n@@ -78,11 +106,14 @@ from . import auxiliary as aux\nProtein = namedtuple('Protein', ('description', 'sequence'))\nclass FASTABase():\n+ \"\"\"Abstract base class for FASTA file parsers.\n+ Can be used for type checking.\n+ \"\"\"\nparser = staticmethod(lambda x: x)\n_ignore_comments = False\n_comments = set('>;')\n- def __init__(self, source, ignore_comments=False, parser=None):\n+ def __init__(self, ignore_comments=False, parser=None):\nself._ignore_comments = ignore_comments\nif parser is not None:\nself.parser = parser\n@@ -98,9 +129,35 @@ class FASTABase():\nclass FASTA(aux.FileReader, FASTABase):\n+ \"\"\"Text-mode, sequential FASTA parser.\n+ Suitable for iteration over the file to obtain all entries in order.\n+ \"\"\"\ndef __init__(self, source, ignore_comments=False, parser=None, encoding=None):\n+ \"\"\"Create a new FASTA parser object. Supports iteration,\n+ yields `(description, sequence)` tuples. Supports `with` syntax.\n+\n+ Parameters\n+ ----------\n+\n+ source : str or file-like\n+ File to read. If file object, it must be opened in *text* mode.\n+ ignore_comments : bool, optional\n+ If :py:const:`True` then ignore the second and subsequent lines of description.\n+ Default is :py:const:`False`, which concatenates multi-line descriptions into\n+ a single string.\n+ parser : function or None, optional\n+ Defines whether the FASTA descriptions should be parsed. If it is a\n+ function, that function will be given the description string, and\n+ the returned value will be yielded together with the sequence.\n+ The :py:data:`std_parsers` dict has parsers for several formats.\n+ Hint: specify :py:func:`parse` as the parser to apply automatic\n+ format recognition.\n+ Default is :py:const:`None`, which means return the header \"as is\".\n+ encoding : str or None, optional\n+ File encoding (if it is given by name).\n+ \"\"\"\naux.FileReader.__init__(self, source, 'r', self._read, False, (), {}, encoding)\n- FASTABase.__init__(self, source, ignore_comments, parser)\n+ FASTABase.__init__(self, ignore_comments, parser)\nself.encoding = encoding\ndef _read(self):\n@@ -128,7 +185,7 @@ class FASTA(aux.FileReader, FASTABase):\nsequence = ''.join(accumulated_strings[1:])\n# Drop the translation stop sign.\n- if sequence.endswith('*'):\n+ if sequence and sequence[-1] == '*':\nsequence = sequence[:-1]\nyield Protein(self.parser(description), sequence)\naccumulated_strings = [stripped_string[1:]]\n@@ -139,16 +196,50 @@ class FASTA(aux.FileReader, FASTABase):\nelse:\naccumulated_strings.append(stripped_string)\n+ def get_entry(self, key):\n+ raise aux.PyteomicsError('Direct indexing is not supported. '\n+ 'Use IndexedFASTA and its subclasses')\n+\nclass IndexedFASTA(aux.IndexedTextReader, FASTABase):\n+ \"\"\"Indexed FASTA parser. Supports direct indexing by matched labels.\"\"\"\ndelimiter = '>'\nlabel = r'^>(.*)'\n- def __init__(self, source, ignore_comments=False, parser=None, encoding='utf-8', block_size=None,\n- delimiter=None, label=None, label_group=None):\n- aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, encoding,\n- block_size, delimiter, label, label_group)\n- FASTABase.__init__(self, source, ignore_comments, parser)\n+ def __init__(self, source, ignore_comments=False, parser=None, **kwargs):\n+ \"\"\"Create an indexed FASTA parser object.\n+\n+ Parameters\n+ ----------\n+ source : str or file-like\n+ File to read. If file object, it must be opened in *binary* mode.\n+ ignore_comments : bool, optional\n+ If :py:const:`True` then ignore the second and subsequent lines of description.\n+ Default is :py:const:`False`, which concatenates multi-line descriptions into\n+ a single string.\n+ parser : function or None, optional\n+ Defines whether the FASTA descriptions should be parsed. If it is a\n+ function, that function will be given the description string, and\n+ the returned value will be yielded together with the sequence.\n+ The :py:data:`std_parsers` dict has parsers for several formats.\n+ Hint: specify :py:func:`parse` as the parser to apply automatic\n+ format recognition.\n+ Default is :py:const:`None`, which means return the header \"as is\".\n+ encoding : str or None, optional, keyword only\n+ File encoding. Default is UTF-8.\n+ block_size : int or None, optional, keyword only\n+ Number of bytes to consume at once.\n+ delimiter : str or None, optional, keyword only\n+ Overrides the FASTA record delimiter (default is ``'>'``).\n+ label : str or None, optional, keyword only\n+ Overrides the FASTA record label pattern. Default is ``r'^>(.*)'``.\n+ label_group : int or str, optional, keyword only\n+ Overrides the matched group used as key in the byte offset index.\n+ This in combination with `label` can be used to extract fields from headers.\n+ However, consider using :py:class:`TwoLayerIndexedFASTA` for this purpose.\n+ \"\"\"\n+ aux.IndexedTextReader.__init__(self, source, self._read, False, (), {}, **kwargs)\n+ FASTABase.__init__(self, ignore_comments, parser)\ndef _read_protein_lines(self, lines):\ndescription = []\n@@ -188,18 +279,50 @@ class IndexedFASTA(aux.IndexedTextReader, FASTABase):\nclass TwoLayerIndexedFASTA(IndexedFASTA):\n+ \"\"\"Parser with two-layer index. Extracted groups are mapped to full headers (where possible),\n+ full headers are mapped to byte offsets.\n+\n+ When indexed, they key is looked up in both indexes, allowing access by meaningful IDs\n+ (like UniProt accession) and by full header string.\"\"\"\nheader_group = 1\ndef __init__(self, source, header_pattern, header_group=None,\n- ignore_comments=False, parser=None, encoding='utf-8', block_size=None,\n- delimiter=None, label=None, label_group=None):\n- super(TwoLayerIndexedFASTA, self).__init__(source, ignore_comments, parser,\n- encoding, block_size, delimiter, label, label_group)\n+ ignore_comments=False, parser=None, **kwargs):\n+ \"\"\"Open `source` and create a two-layer index for convenient random access\n+ both by full header strings and extracted fields.\n+\n+ Parameters\n+ ----------\n+ source : str or file-like\n+ File to read. If file object, it must be opened in *binary* mode.\n+ header_pattern : str or RE\n+ Pattern to match the header string. Must capture the group used\n+ for the second index.\n+ header_group : int or str or None, optional\n+ Defines which group is used as key in the second-level index.\n+ Default is 1.\n+ ignore_comments : bool, optional\n+ If :py:const:`True` then ignore the second and subsequent lines of description.\n+ Default is :py:const:`False`, which concatenates multi-line descriptions into\n+ a single string.\n+ parser : function or None, optional\n+ Defines whether the FASTA descriptions should be parsed. If it is a\n+ function, that function will be given the description string, and\n+ the returned value will be yielded together with the sequence.\n+ The :py:data:`std_parsers` dict has parsers for several formats.\n+ Hint: specify :py:func:`parse` as the parser to apply automatic\n+ format recognition.\n+ Default is :py:const:`None`, which means return the header \"as is\".\n+\n+ Other arguments : the same as for :py:class:`IndexedFASTA`.\n+ \"\"\"\n+ super(TwoLayerIndexedFASTA, self).__init__(source, ignore_comments, parser, **kwargs)\nif header_group is not None:\nself.header_group = header_group\nself.header_pattern = header_pattern\nself.build_second_index()\ndef build_second_index(self):\n+ \"\"\"Create the mapping from extracted field to whole header string.\"\"\"\nindex = {}\nfor key in self._offset_index:\nmatch = re.match(self.header_pattern, key)\n@@ -208,6 +331,7 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nself._id2header = index\ndef get_entry(self, key):\n+ \"\"\"Get the entry by value of header string or extracted field.\"\"\"\nraw = super(TwoLayerIndexedFASTA, self).get_entry(key)\nif raw is not None:\nreturn raw\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "from os import path\n-import pyteomics\n-pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\nimport tempfile\nimport unittest\n-from pyteomics import fasta\nimport random\nimport string\n+import pyteomics\n+pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]\n+from pyteomics import fasta\nclass FastaTest(unittest.TestCase):\nmaxDiff = None\n@@ -32,14 +32,20 @@ class FastaTest(unittest.TestCase):\nlist(reader(self.fasta_file, ignore_comments=True)))\ndef test_indexed_read(self):\n- self.assertEqual(self.fasta_entries_short[1:],\n- list(fasta.IndexedFASTA(self.fasta_file)))\n+ tlir = fasta.TwoLayerIndexedFASTA(self.fasta_file, r'>(.*)')\n+ ir = fasta.IndexedFASTA(self.fasta_file)\n+ for reader in [ir, tlir]:\n+ self.assertEqual(self.fasta_entries_short[1:], list(reader))\ndef test_index_retrieve(self):\nkey = 'test sequence 4'\nwith fasta.IndexedFASTA(self.fasta_file) as ir:\nself.assertEqual(self.fasta_entries_short[2], ir[key])\n+ def test_two_layer_retrieve(self):\n+ with fasta.TwoLayerIndexedFASTA(self.fasta_file, r'test sequence (.*)') as tlir:\n+ self.assertEqual(self.fasta_entries_short[2], tlir['4'])\n+\ndef test_decoy_sequence_reverse(self):\nsequence = ''.join(random.choice(string.ascii_uppercase)\nfor i in range(random.randint(1, 50)))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Fill in some missing docstrings, update tests in fasta |
377,522 | 12.07.2018 19:34:37 | -10,800 | 64ea52d3ebdf6bdce83698cc5315f5dfd34263bd | Make header_pattern optional in TwoLayerIndexedFASTA | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -109,7 +109,7 @@ class FASTABase():\n\"\"\"Abstract base class for FASTA file parsers.\nCan be used for type checking.\n\"\"\"\n- parser = staticmethod(lambda x: x)\n+ parser = None\n_ignore_comments = False\n_comments = set('>;')\n@@ -187,7 +187,9 @@ class FASTA(aux.FileReader, FASTABase):\n# Drop the translation stop sign.\nif sequence and sequence[-1] == '*':\nsequence = sequence[:-1]\n- yield Protein(self.parser(description), sequence)\n+ if self.parser is not None:\n+ description = self.parser(description)\n+ yield Protein(description, sequence)\naccumulated_strings = [stripped_string[1:]]\nelse:\n# accumulated_strings is empty; we're probably reading\n@@ -262,7 +264,9 @@ class IndexedFASTA(aux.IndexedTextReader, FASTABase):\n# Drop the translation stop sign.\nif sequence and sequence[-1] == '*':\nsequence = sequence[:-1]\n- return Protein(self.parser(description), sequence)\n+ if self.parser is not None:\n+ description = self.parser(description)\n+ return Protein(description, sequence)\ndef _entry_from_offsets(self, start, end):\nlines = self._read_lines_from_offsets(start, end)\n@@ -285,7 +289,8 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nWhen indexed, they key is looked up in both indexes, allowing access by meaningful IDs\n(like UniProt accession) and by full header string.\"\"\"\nheader_group = 1\n- def __init__(self, source, header_pattern, header_group=None,\n+ header_pattern = None\n+ def __init__(self, source, header_pattern=None, header_group=None,\nignore_comments=False, parser=None, **kwargs):\n\"\"\"Open `source` and create a two-layer index for convenient random access\nboth by full header strings and extracted fields.\n@@ -294,9 +299,9 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\n----------\nsource : str or file-like\nFile to read. If file object, it must be opened in *binary* mode.\n- header_pattern : str or RE\n+ header_pattern : str or RE or None, optional\nPattern to match the header string. Must capture the group used\n- for the second index.\n+ for the second index. If :py:const:`None` (default), second-level index is not created.\nheader_group : int or str or None, optional\nDefines which group is used as key in the second-level index.\nDefault is 1.\n@@ -318,11 +323,15 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nsuper(TwoLayerIndexedFASTA, self).__init__(source, ignore_comments, parser, **kwargs)\nif header_group is not None:\nself.header_group = header_group\n+ if header_pattern is not None:\nself.header_pattern = header_pattern\nself.build_second_index()\ndef build_second_index(self):\n\"\"\"Create the mapping from extracted field to whole header string.\"\"\"\n+ if self.header_pattern is None:\n+ self._id2header = None\n+ else:\nindex = {}\nfor key in self._offset_index:\nmatch = re.match(self.header_pattern, key)\n@@ -335,11 +344,13 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nraw = super(TwoLayerIndexedFASTA, self).get_entry(key)\nif raw is not None:\nreturn raw\n+ if self._id2header:\nheader = self._id2header.get(key)\nif header is not None:\nreturn super(TwoLayerIndexedFASTA, self).get_entry(header)\n+\n@aux._file_reader()\ndef read(source=None, ignore_comments=False, parser=None):\n\"\"\"Read a FASTA file and return entries iteratively.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_fasta.py",
"new_path": "tests/test_fasta.py",
"diff": "@@ -32,7 +32,7 @@ class FastaTest(unittest.TestCase):\nlist(reader(self.fasta_file, ignore_comments=True)))\ndef test_indexed_read(self):\n- tlir = fasta.TwoLayerIndexedFASTA(self.fasta_file, r'>(.*)')\n+ tlir = fasta.TwoLayerIndexedFASTA(self.fasta_file)\nir = fasta.IndexedFASTA(self.fasta_file)\nfor reader in [ir, tlir]:\nself.assertEqual(self.fasta_entries_short[1:], list(reader))\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Make header_pattern optional in TwoLayerIndexedFASTA |
377,522 | 19.07.2018 01:02:21 | -10,800 | 8102351cf14499908973d7d55d2d5410e6bd7cbf | Add format-specific fasta parser classes; change flavour to flavor in parse (sorry) | [
{
"change_type": "MODIFY",
"old_path": "pyteomics/fasta.py",
"new_path": "pyteomics/fasta.py",
"diff": "@@ -350,27 +350,152 @@ class TwoLayerIndexedFASTA(IndexedFASTA):\nreturn super(TwoLayerIndexedFASTA, self).get_entry(header)\n+class FlavoredMixin():\n+ \"\"\"Parser aimed at a specific FASTA flavor.\n+ Subclasses should define `parser` and `header_pattern`.\n+ The `parse` argument in :py:meth:`__init__` defines whether description is\n+ parsed in output.\n+ \"\"\"\n+ def __init__(self, parse=True):\n+ if not parse:\n+ self.parser = None\n-@aux._file_reader()\n-def read(source=None, ignore_comments=False, parser=None):\n- \"\"\"Read a FASTA file and return entries iteratively.\n+\n+class UniProtMixin(FlavoredMixin):\n+ header_pattern = r'^(\\w+)\\|([-\\w]+)\\|(\\w+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\n+ header_group = 2\n+\n+ def parser(self, header):\n+ db, ID, entry, name, pairs, _ = re.match(self.header_pattern, header).groups()\n+ gid, taxon = entry.split('_')\n+ info = {'db': db, 'id': ID, 'entry': entry,\n+ 'name': name, 'gene_id': gid, 'taxon': taxon}\n+ info.update(_split_pairs(pairs))\n+ _intify(info, ('PE', 'SV'))\n+ return info\n+\n+class UniProt(UniProtMixin, FASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ FASTA.__init__(self, source, **kwargs)\n+ UniProtMixin.__init__(self, parse)\n+\n+\n+class IndexedUniProt(UniProtMixin, TwoLayerIndexedFASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n+ UniProtMixin.__init__(self, parse)\n+\n+class UniRefMixin(FlavoredMixin):\n+ header_pattern = r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\n+\n+ def parser(self, header):\n+ assert 'Tax' in header\n+ ID, cluster, pairs, _ = re.match(self.header_pattern, header).groups()\n+ info = {'id': ID, 'cluster': cluster}\n+ info.update(_split_pairs(pairs))\n+ gid, taxon = info['RepID'].split('_')\n+ type_, acc = ID.split('_')\n+ info.update({'taxon': taxon, 'gene_id': gid, 'type': type_, 'accession': acc})\n+ _intify(info, ('n',))\n+ return info\n+\n+\n+class UniRef(UniRefMixin, FASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ FASTA.__init__(self, source, **kwargs)\n+ UniRefMixin.__init__(self, parse)\n+\n+\n+class IndexedUniRef(UniRefMixin, TwoLayerIndexedFASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n+ UniRefMixin.__init__(self, parse)\n+\n+\n+class UniParcMixin(FlavoredMixin):\n+ header_pattern = r'(\\S+)\\s+status=(\\w+)\\s*$'\n+\n+ def parser(self, header):\n+ ID, status = re.match(self.header_pattern, header).groups()\n+ return {'id': ID, 'status': status}\n+\n+\n+class UniParc(UniParcMixin, FASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ FASTA.__init__(self, source, **kwargs)\n+ UniParcMixin.__init__(self, parse)\n+\n+\n+class IndexedUniParc(UniParcMixin, TwoLayerIndexedFASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n+ UniParcMixin.__init__(self, parse)\n+\n+\n+class UniMesMixin(FlavoredMixin):\n+ header_pattern = r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\n+\n+ def parser(self, header):\n+ assert 'OS=' in header and 'SV=' in header and 'PE=' not in header\n+ ID, name, pairs, _ = re.match(self.header_pattern, header).groups()\n+ info = {'id': ID, 'name': name}\n+ info.update(_split_pairs(pairs))\n+ _intify(info, ('SV',))\n+ return info\n+\n+\n+class UniMes(UniMesMixin, FASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ FASTA.__init__(self, source, **kwargs)\n+ UniMesMixin.__init__(self, parse)\n+\n+\n+class IndexedUniMes(UniMesMixin, TwoLayerIndexedFASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n+ UniMesMixin.__init__(self, parse)\n+\n+\n+class SPDMixin(FlavoredMixin):\n+ header_pattern = r'^([^|]+?)\\s*\\|\\s*(([^|]+?)_([^|]+?))\\s*\\|\\s*([^|]+?)\\s*$'\n+\n+ def parser(self, header):\n+ assert '=' not in header\n+ ID, gene, gid, taxon, d = re.match(self.header_pattern, header).groups()\n+ return {'id': ID, 'gene': gene, 'description': d,\n+ 'taxon': taxon, 'gene_id': gid}\n+\n+\n+class SPD(SPDMixin, FASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ FASTA.__init__(self, source, **kwargs)\n+ SPDMixin.__init__(self, parse)\n+\n+\n+class IndexedSPD(SPDMixin, TwoLayerIndexedFASTA):\n+ def __init__(self, source, parse=True, **kwargs):\n+ TwoLayerIndexedFASTA.__init__(self, source, **kwargs)\n+ SPDMixin.__init__(self, parse)\n+\n+\n+def read(source=None, use_index=False, flavor=None, **kwargs):\n+ \"\"\"Parse a FASTA file. This function serves as a dispatcher between\n+ different parsers available in this module.\nParameters\n----------\nsource : str or file or None, optional\nA file object (or file name) with a FASTA database. Default is\n:py:const:`None`, which means read standard input.\n- ignore_comments : bool, optional\n- If True then ignore the second and subsequent lines of description.\n- Default is :py:const:`False`.\n- parser : function or None, optional\n- Defines whether the fasta descriptions should be parsed. If it is a\n- function, that function will be given the description string, and\n- the returned value will be yielded together with the sequence.\n- The :py:data:`std_parsers` dict has parsers for several formats.\n- Hint: specify :py:func:`parse` as the parser to apply automatic\n- format recognition.\n- Default is :py:const:`None`, which means return the header \"as is\".\n+ use_index : bool, optional\n+ If :py:const:`True`, the created parser object will be an instance of\n+ :py:class:`IndexedFASTA`. If :py:const:`False` (default), it will be\n+ an instance of :py:class:`FASTA`.\n+ flavor : str or None, optional\n+ A supported FASTA header format. If specified, a format-specific\n+ parser instance is returned.\n+\n+ .. note:: See :py:data:`std_parsers` for supported flavors.\nReturns\n-------\n@@ -378,41 +503,13 @@ def read(source=None, ignore_comments=False, parser=None):\nA named 2-tuple with FASTA header (str or dict) and sequence (str).\nAttributes 'description' and 'sequence' are also provided.\n\"\"\"\n- f = parser or (lambda x: x)\n- accumulated_strings = []\n-\n- # Iterate through '>' after the file is over to retrieve the last entry.\n- for string in itertools.chain(source, '>'):\n- stripped_string = string.strip()\n-\n- # Skip empty lines.\n- if not stripped_string:\n- continue\n-\n- is_comment = (stripped_string[0] in '>;')\n- if is_comment:\n- # If it is a continuing comment\n- if len(accumulated_strings) == 1:\n- if not ignore_comments:\n- accumulated_strings[0] += (' '+stripped_string[1:])\n- else:\n- continue\n+ try:\n+ parser = std_parsers[flavor and flavor.lower()]\n+ except KeyError:\n+ raise aux.PyteomicsError('No parser for flavor: {}. Supported flavors: {}'.format(\n+ flavor, ', '.join(map(str, std_parsers))))\n+ return parser[use_index](source, **kwargs)\n- elif accumulated_strings:\n- description = accumulated_strings[0]\n- sequence = ''.join(accumulated_strings[1:])\n-\n- # Drop the translation stop sign.\n- if sequence.endswith('*'):\n- sequence = sequence[:-1]\n- yield Protein(f(description), sequence)\n- accumulated_strings = [stripped_string[1:], ]\n- else:\n- # accumulated_strings is empty; we're probably reading\n- # the very first line of the file\n- accumulated_strings.append(stripped_string[1:])\n- else:\n- accumulated_strings.append(stripped_string)\n@aux._file_writer()\ndef write(entries, output=None):\n@@ -681,60 +778,18 @@ def _intify(d, keys):\nif k in d:\nd[k] = int(d[k])\n-# definitions for custom parsers\n-_uniprotkb_header = r'^(\\w+)\\|([-\\w]+)\\|(\\w+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\n-def _parse_uniprotkb(header):\n- db, ID, entry, name, pairs, _ = re.match(_uniprotkb_header, header).groups()\n- gid, taxon = entry.split('_')\n- info = {'db': db, 'id': ID, 'entry': entry,\n- 'name': name, 'gene_id': gid, 'taxon': taxon}\n- info.update(_split_pairs(pairs))\n- _intify(info, ('PE', 'SV'))\n- return info\n-\n-_uniref_header = r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\n-def _parse_uniref(header):\n- assert 'Tax' in header\n- ID, cluster, pairs, _ = re.match(_uniref_header, header).groups()\n- info = {'id': ID, 'cluster': cluster}\n- info.update(_split_pairs(pairs))\n- gid, taxon = info['RepID'].split('_')\n- type_, acc = ID.split('_')\n- info.update({'taxon': taxon, 'gene_id': gid, 'type': type_, 'accession': acc})\n- _intify(info, ('n',))\n- return info\n-\n-_uniparc_header = r'(\\S+)\\s+status=(\\w+)\\s*$'\n-def _parse_uniparc(header):\n- ID, status = re.match(_uniparc_header, header).groups()\n- return {'id': ID, 'status': status}\n-\n-_unimes_header = r'^(\\S+)\\s+([^=]*\\S)((\\s+\\w+=[^=]+(?!\\w*=))+)\\s*$'\n-def _parse_unimes(header):\n- assert 'OS=' in header and 'SV=' in header and 'PE=' not in header\n- ID, name, pairs, _ = re.match(_unimes_header, header).groups()\n- info = {'id': ID, 'name': name}\n- info.update(_split_pairs(pairs))\n- _intify(info, ('SV',))\n- return info\n-\n-def _parse_spd(header):\n- assert '=' not in header\n- ID, gene, d = map(lambda s: s.strip(), header.split('|'))\n- gid, taxon = gene.split('_')\n- return {'id': ID, 'gene': gene, 'description': d,\n- 'taxon': taxon, 'gene_id': gid}\n-std_parsers = {'uniprotkb': _parse_uniprotkb, 'uniref': _parse_uniref,\n- 'uniparc': _parse_uniparc, 'unimes': _parse_unimes, 'spd': _parse_spd}\n+std_parsers = {'uniprot': (UniProt, IndexedUniProt), 'uniref': (UniRef, IndexedUniRef),\n+ 'uniparc': (UniParc, IndexedUniParc), 'unimes': (UniMes, IndexedUniMes),\n+ 'spd': (SPD, IndexedSPD), None: (FASTA, IndexedFASTA)}\n\"\"\"A dictionary with parsers for known FASTA header formats. For now, supported\nformats are those described at\n`UniProt help page <http://www.uniprot.org/help/fasta-headers>`_.\"\"\"\n-_std_patterns = {'uniprotkb': _uniprotkb_header, 'uniref': _uniref_header,\n- 'uniparc': _uniparc_header, 'unimes': _unimes_header}\n+_std_mixins = {'uniprot': UniProtMixin, 'uniref': UniRefMixin,\n+ 'uniparc': UniParcMixin, 'unimes': UniMesMixin, 'spd': SPDMixin}\n-def parse(header, flavour='auto', parsers=None):\n+def parse(header, flavor='auto', parsers=None):\n\"\"\"Parse the FASTA header and return a nice dictionary.\nParameters\n@@ -742,44 +797,47 @@ def parse(header, flavour='auto', parsers=None):\nheader : str\nFASTA header to parse\n- flavour : str, optional\n+ flavor : str, optional\nShort name of the header format (case-insensitive). Valid values are\n:py:const:`'auto'` and keys of the `parsers` dict. Default is\n:py:const:`'auto'`, which means try all formats in turn and return the\nfirst result that can be obtained without an exception.\nparsers : dict, optional\nA dict where keys are format names (lowercased) and values are functions\n- that take a header string and return the parsed header. Default is\n- :py:const:`None`, which means use the default dictionary\n- :py:data:`std_parsers`.\n+ that take a header string and return the parsed header.\nReturns\n-------\nout : dict\nA dictionary with the info from the header. The format depends on the\n- flavour.\"\"\"\n-\n+ flavor.\n+ \"\"\"\n+ parser_function = lambda cls: cls().parser\n+ flavor = flavor.lower()\n# accept strings with and without leading '>'\nif header and header[0] == '>':\nheader = header[1:]\n# choose the format\n- known = parsers or std_parsers\n- if flavour.lower() == 'auto':\n+ known = parsers or _std_mixins\n+\n+ if flavor == 'auto':\nfor parser in known.values():\ntry:\n- return parser(header)\n+ return parser_function(parser)(header)\nexcept Exception:\npass\nraise aux.PyteomicsError('Unknown FASTA header format: ' + header)\n- elif flavour.lower() in known:\n+ elif flavor in known:\ntry:\n- return known[flavour.lower()](header)\n+ return parser_function(known[flavor])(header)\nexcept Exception as e:\nraise aux.PyteomicsError('Could not parse header as \"{}\". '\n'The error message was: {}: {}. Header: \"{}\"'.format(\n- flavour, type(e).__name__, e.args[0], header))\n+ flavor, type(e).__name__, e.args[0], header))\n+ raise aux.PyteomicsError('Unknown flavor: {}'.format(flavor))\n+\nchain = aux._make_chain(read, 'read')\ndecoy_chain = aux._make_chain(decoy_db, 'decoy_db')\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | levitsky/pyteomics | Add format-specific fasta parser classes; change flavour to flavor in parse (sorry) |