python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
# This is an example on how to access content of TREC CAR data
# and convert it into a string of content with offset-based entity link annotations.
# Feel free to use the AnnotatedContentBuilder
# I highly recommend that you implement your own version `annotate_section_content`
# because you need to make decisions on which content to include, where to
# futher provide newlines etc.
# Keep in mind, whatever you add to your output needs to go through the
# AnnotatedContenBuilder or offsets won't match
# you can add all kinds of semantic annotations on offsets. However, in the current
# implementation they much be non-overlapping.
from trec_car.read_data import *
class Annotation():
"""Wraps a semantic annotation with offset information """
def __init__(self, start, end, annotation):
self.start = start
self.end = end
self.annotation = annotation
class AnnotatedContentBuilder():
"""Builds a string iteratively and keeps track of offsets.
Chunks of plain text and semantic annotations need to added in order
"""
def __init__(self):
self.content = ""
self.offset = 0
self.annotations = []
def append(self, chunk, optAnnotation=None):
start = self.offset
self.content += chunk
self.offset = len(self.content)
end = self.offset
if optAnnotation:
self.annotations.append( Annotation(start=start, end=end, annotation=optAnnotation))
def get_content(self):
return self.content
def get_annotations(self):
return self.annotations
def annotate_section_content(section):
""" Example implementation to break out the content of a (top-level) section with entity links """
def annotated_content(skel, contentBuilder):
if isinstance(skel, Section):
contentBuilder.append('\n')
contentBuilder.append(skel.heading)
contentBuilder.append('\n')
for child in skel.children:
annotated_content(child, contentBuilder)
# contentBuilder.append('\n')
elif isinstance(skel, List):
annotated_content(skel.body, contentBuilder)
elif isinstance(skel, Para):
for body in skel.paragraph.bodies:
annotated_content_bodies(body, contentBuilder)
contentBuilder.append('\n')
else:
pass
def annotated_content_bodies(body, contentBuilder):
if isinstance(body, ParaLink):
contentBuilder.append(body.get_text(), body)
elif isinstance(body, ParaText):
contentBuilder.append(body.get_text())
else:
pass
contentBuilder = AnnotatedContentBuilder()
for child in section.children:
annotated_content(child, contentBuilder)
return contentBuilder
if __name__ == '__main__':
import sys
if len(sys.argv)<1 or len(sys.argv)>3:
print("usage ",sys.argv[0]," articlefile")
exit()
articles=sys.argv[1]
with open(articles, 'rb') as f:
for p in iter_pages(f):
print('\npagename:', p.page_name)
print('\npageid:', p.page_id)
print("get content of top-level sections, with subsections inlined and broken out entity offsets")
for section in p.child_sections:
print(" == ",section.heading ," ==")
builder = annotate_section_content(section)
print(builder.get_content())
for ann in builder.get_annotations():
print(ann.start, ann.end, ann.annotation)
print()
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/annotated_content.py |
from trec_car.format_runs import *
from trec_car.read_data import *
import itertools
import sys
if len(sys.argv)<3:
print("usage ",sys.argv[0]," outlinefile paragraphfile out")
exit()
query_cbor=sys.argv[1]
psg_cbor=sys.argv[2]
out=sys.argv[3]
pages = []
with open(query_cbor, 'rb') as f:
pages = [p for p in itertools.islice(iter_annotations(f), 0, 1000)]
paragraphs = []
with open(psg_cbor, 'rb') as f:
d = {p.para_id: p for p in itertools.islice(iter_paragraphs(f), 0, 500 ,5)}
paragraphs = d.values()
print("pages: ", len(pages))
print("paragraphs: ", len(paragraphs))
mock_ranking = [(p, 1.0 / (r + 1), (r + 1)) for p, r in zip(paragraphs, range(0, 1000))]
with open(out,mode='w', encoding='UTF-8') as f:
writer = f
numqueries = 0
for page in pages:
for section_path in page.flat_headings_list():
numqueries += 1
query_id = "/".join([page.page_id]+[section.headingId for section in section_path])
ranking = [RankingEntry(query_id, p.para_id, r, s, paragraph_content=p) for p, s, r in mock_ranking]
format_run(writer, ranking, exp_name='test')
f.close()
print("num queries = ", numqueries)
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/format_runs_test.py |
"""__init__ module for trec-car-tools, imports all necessary functions for reading cbor data provided in the TREC CAR"""
__version__ = 1.0
__all__ = ['read_data', 'format_runs']
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/trec_car/__init__.py |
# Use python 3.6 or higher
# obsolete: conda install -c auto cbor=0.1.4
from __future__ import print_function
from abc import abstractmethod
import cbor
import itertools
import typing
PageId = str
PageName = str
class CborElementNotDefinedException(Exception):
def __init__(self, cbor):
self.cbor = cbor
Exception.__init__(self, 'unknown Cbor element encountrered: %s' % str(cbor))
class WrongCarFileException(Exception):
def __init__(self, file_type, expected_file_types):
self.file_type = file_type
self.expected_file_types = expected_file_types
Exception.__init__(self, 'Open method does not support CAR file type: %s. Instead expect following CAR file types: %s' % (str(file_type), str(expected_file_types)))
class BrokenCborFileException(Exception):
def __init__(self):
Exception.__init__(self, 'Corrupt, incomplete, or otherwise broken CBOR file. Please re-download or contact the organizers or use appropriate reader to open this file.')
class Page(object):
"""
The name and skeleton of a Wikipedia page.
.. attribute:: page_name
:rtype: PageName
The name of the page.
.. attribute:: skeleton
:rtype: typing.List[PageSkeleton]
The contents of the page
.. attribute:: page_type
:rtype: PageType
Type about the page
.. attribute:: page_meta
:rtype: PageMetadata
Metadata about the page
"""
def __init__(self, page_name, page_id, skeleton, page_type, page_meta):
self.page_name = page_name
self.page_id = page_id
self.skeleton = list(skeleton)
self.child_sections = [child for child in self.skeleton if isinstance(child, Section)]
self.page_type = page_type
self.page_meta = page_meta
def deep_headings_list(self):
return [child.nested_headings() for child in self.child_sections]
def flat_headings_list(self):
""" return
Returns a flat list of headings contained by the :class:`Page`.
:rtype: typing.List[Section]
"""
def flatten(prefix, headings):
for section, children in headings:
new_prefix = prefix + [section]
if len(children)>0 :
yield new_prefix
yield from flatten(new_prefix, children)
else:
yield new_prefix
deep_headings = self.deep_headings_list()
return list(flatten([], deep_headings))
def get_infoboxes(self):
toplevel_infoboxes = [child for child in self.skeleton if isinstance(child, InfoBox)]
section_infoboxes = [section.get_infoboxes()
for sections
in self.flat_headings_list()
for section in sections]
return toplevel_infoboxes + list(itertools.chain.from_iterable(section_infoboxes))
@staticmethod
def from_cbor(cbor):
if not (cbor[0] == 0 or cbor[0] == 1): # tag
raise CborElementNotDefinedException(cbor)
pagename = cbor[1]
pageId = cbor[2].decode('ascii')
if len(cbor)==4:
return Page(pagename, pageId, map(PageSkeleton.from_cbor, cbor[3]), ArticlePage, PageMetadata.default())
else:
page_type = PageType.from_cbor(cbor[4])
return Page(pagename, pageId, map(PageSkeleton.from_cbor, cbor[3]), page_type, PageMetadata.from_cbor(cbor[5]))
def __str__(self):
return "Page(%s)" % self.page_name
def to_string(self):
"""
Render a string representation of the page.
:rtype: str
"""
return self.page_name + self.page_meta +\
'\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + '\n'.join(str(s) for s in self.skeleton)
def nested_headings(self):
"""
Each heading recursively represented by a pair of ``(heading,
list_of_child_sections)``.
:rtype: typing.List[typing.Tuple[Section, typing.List[Section]]]
"""
result = [child.nested_headings() for child in self.child_sections]
return result
def outline(self):
return self.child_sections
def get_text(self):
"""Include all visible text below this elements. Includes Captions of images, but no headings and no infoboxes. See `get_text_with_headings` for a version that includes headings."""
return '\n'.join(skel.get_text() for skel in self.skeleton)
def get_text_with_headings(self, include_heading = False):
"""Include all visible text below this elements. While the heading of this element is excluded, headings of subsections will be included. Captions of images are excluded."""
return '\n'.join(skel.get_text_with_headings(include_heading = True) for skel in self.skeleton)
class PageType(object):
"""
An abstract base class representing the various types of pages.
Subclasses include
* :class:`ArticlePage`
* :class:`CategoryPage`
* :class:`DisambiguationPage`
* :class:`RedirectPage`
"""
@staticmethod
def from_cbor(cbor):
typetag = cbor[0]
if typetag == 0: return ArticlePage()
elif typetag == 1: return CategoryPage()
elif typetag == 2: return DisambiguationPage()
elif typetag == 3:
target = cbor[1]
if type(target) == list: # TODO this is almost certainly wrong
targetPage = target[1]
else:
targetPage = target.decode('ascii')
return RedirectPage(targetPage)
else:
raise CborElementNotDefinedException(cbor)
class ArticlePage(PageType):
''
def __init__(self):
pass
def __str__(self): return "ArticlePage"
class CategoryPage(PageType):
def __init__(self):
pass
def __str__(self): return "CategoryPage"
class DisambiguationPage(PageType):
def __init__(self):
pass
def __str__(self): return "Disambiguation Page"
class RedirectPage(PageType):
"""
.. attribute:: targetPage
:rtype: PageId
The target of the redirect.
"""
def __init__(self, targetPage):
self.targetPage = targetPage
def __str__(self):
return "RedirectPage " + self.targetPage
class PageMetadata(object):
"""
Meta data for a page
.. attribute:: redirectNames
:rtype: PageName
Names of pages which redirect to this page
.. attribute:: disambiguationNames
:rtype: PageName
Names of disambiguation pages which link to this page
.. attribute:: disambiguationId
:rtype: PageId
Page IDs of disambiguation pages which link to this page
.. attribute:: categoryNames
:rtype: str
Page names of categories to which this page belongs
.. attribute:: categoryIds
:rtype: str
Page IDs of categories to which this page belongs
.. attribute:: inlinkIds
:rtype: str
Page IDs of pages containing inlinks
.. attribute:: inlinkAnchors
inlinkAnchor frequencies
:rtype: str
(Anchor text, frequency) of pages containing inlinks
"""
def __init__(self, redirectNames, disambiguationNames, disambiguationIds, categoryNames, categoryIds, inlinkIds,
inlinkAnchors):
self.inlinkAnchors = inlinkAnchors
self.inlinkIds = inlinkIds
self.categoryIds = categoryIds
self.categoryNames = categoryNames
self.disambiguationIds = disambiguationIds
self.disambiguationNames = disambiguationNames
self.redirectNames = redirectNames
@staticmethod
def default():
return PageMetadata(None, None, None, None, None, None, None)
def __str__(self):
redirStr = ("" if self.redirectNames is None else (" redirected = "+", ".join([name for name in self.redirectNames])))
disamStr = ("" if self.disambiguationNames is None else (" disambiguated = "+", ".join([name for name in self.disambiguationNames])))
catStr = ("" if self.redirectNames is None else (" categories = "+", ".join([name for name in (self.categoryNames or [])])))
inlinkStr = ("" if self.inlinkIds is None else (" inlinks = "+", ".join([name for name in self.inlinkIds])))
# inlinkAnchorStr = str (self.inlinkAnchors)
inlinkAnchorStr = ("" if self.inlinkAnchors is None else
(" inlinkAnchors = "+", ".join(
[ ("%s: %d" % (name, freq)) for (name, freq) in self.inlinkAnchors]
# [ ("%s: " % (name)) for (name, freq) in self.inlinkAnchors] \
)))
return "%s \n%s \n%s \n%s \n%s\n" % (redirStr, disamStr, catStr, inlinkStr, inlinkAnchorStr)
@staticmethod
def from_cbor(cbor):
redirectNames=None
disambiguationNames=None
disambiguationIds=None
categoryNames=None
categoryIds=None
inlinkIds=None
inlinkAnchors=None
def decodeListOfIdList(cbor):
if len(cbor)==0: return None
else:
return [elem.decode('ascii') for elem in cbor]
def decodeListOfNameList(cbor):
if len(cbor)==0: return None
else:
return cbor
def decodeListOfNameIntList(cbor):
if len(cbor)==0: return None
else:
# need to convert list of pair-lists to lists of pair-tuples
return [(elem[0], elem[1]) for elem in cbor]
for i in range(0, len(cbor), 2):
tag = cbor[i][0]
cbor_data = cbor[i+1]
if tag == 0:
redirectNames = decodeListOfNameList(cbor_data)
elif tag == 1:
disambiguationNames=decodeListOfNameList(cbor_data)
elif tag == 2:
disambiguationIds=decodeListOfIdList(cbor_data)
elif tag == 3:
categoryNames=decodeListOfNameList(cbor_data)
elif tag == 4:
categoryIds=decodeListOfIdList(cbor_data)
elif tag == 5:
inlinkIds=decodeListOfIdList(cbor_data)
elif tag == 6:
# compatability with v1.6
inlinkAnchors = [(anchor, 1) for anchor in decodeListOfNameList(cbor_data)]
elif tag == 7:
# compatability with v2.0
inlinkAnchors = decodeListOfNameIntList(cbor_data)
i+=2
return PageMetadata(redirectNames, disambiguationNames, disambiguationIds, categoryNames, categoryIds, inlinkIds, inlinkAnchors)
class PageSkeleton(object):
"""
An abstract superclass for the various types of page elements. Subclasses include:
* :class:`Section`
* :class:`Para`
* :class:`Image`
"""
@staticmethod
def from_cbor(cbor):
tag = cbor[0]
if tag == 0: # section
heading = cbor[1]
headingId = cbor[2].decode('ascii')
return Section(heading, headingId, map(PageSkeleton.from_cbor, cbor[3]))
elif tag == 1: # para-wrapper
return Para(Paragraph.from_cbor(cbor[1]))
elif tag == 2: #images
imageUrl = cbor[1]
caption = [PageSkeleton.from_cbor(elem) for elem in cbor[2]]
return Image(imageUrl, caption=caption)
elif tag == 3: # paragraph
level = cbor[1]
body = Paragraph.from_cbor(cbor[2])
return List(level, body)
elif tag == 4: # infobox
infobox_title = cbor[1]
cbor_entries = cbor[2]
entries = [ (kv[0], PageSkeleton.from_cbor(kv[1][0])) for kv in cbor_entries if kv[1] and kv[1][0]] # if no value is defined kv[1] will be null.
return InfoBox(infobox_title, entries)
else:
raise CborElementNotDefinedException(cbor)
def get_text(self):
"""Includes visible text of this element and below. Headings are excluded. Image Captions are included. Infoboxes are ignored. (For a version with headers and no captions see `get_text_with_headings` """
raise NotImplementedError
def get_text_with_headings(self, include_heading = False):
"""Include all visible text below this elements. While the heading of this element is excluded, headings of subsections will be included. Captions of images are excluded."""
raise NotImplementedError
class Section(PageSkeleton):
"""
A section of a Wikipedia page.
.. attribute:: heading
:rtype: str
The section heading.
.. attribute:: headingId
:rtype: str
The unique identifier of a section heading.
.. attribute:: children
:rtype: typing.List[PageSkeleton]
The :class:`PageSkeleton` elements contained by the section.
"""
def __init__(self, heading, headingId, children):
self.heading = heading
self.headingId = headingId
self.children = list(children)
self.child_sections = [child for child in self.children if isinstance(child, Section)]
def str_(self, level):
bar = "".join("="*level)
children = "".join(c.str_(level=level+1) for c in self.children)
return "\n%s %s %s\n\n%s" % (bar, self.heading, bar, children)
def __str__(self):
return self.str_(level=1)
def __getitem__(self, idx):
return self.children[idx]
def nested_headings(self):
return (self, [child.nested_headings() for child in self.child_sections])
def get_text(self):
return '\n'.join(child.get_text() for child in self.children)
def get_text_with_headings(self, include_heading = False):
opt_heading = self.heading + "\n" if include_heading else ""
return opt_heading + '\n'.join(child.get_text_with_headings(include_heading = True) for child in self.children)
def get_infoboxes(self):
return [child for child in self.children if isinstance(child, InfoBox)]
class Para(PageSkeleton):
"""
A paragraph within a Wikipedia page.
.. attribute:: paragraph
:rtype: Paragraph
The content of the Paragraph (which in turn contain a list of :class:`ParaBody`\ s)
"""
def __init__(self, paragraph):
self.paragraph = paragraph
def str_(self, level=None):
return str(self.paragraph)
def __str__(self):
return self.str_()
def get_text(self):
return self.paragraph.get_text()
def get_text_with_headings(self, include_heading = False):
return self.get_text()
class Image(PageSkeleton):
"""
An image within a Wikipedia page.
.. attribute:: caption
:rtype: str
PageSkeleton representing the caption of the image
.. attribute:: imageurl
:rtype: str
URL to the image; spaces need to be replaced with underscores, Wikimedia
Commons namespace needs to be prefixed
"""
def __init__(self, imageurl, caption):
self.caption = caption
self.imageurl = imageurl
def str_(self, level=None):
return str("!["+self.imageurl+"]. Caption: "+(''.join([str(skel) for skel in self.caption])))
def __str__(self):
return self.str_()
def get_text(self):
return '\n'.join(skel.get_text() for skel in self.caption)
def get_text_with_headings(self, include_heading = False):
return ''
class List(PageSkeleton):
"""
An list element within a Wikipedia page.
.. attribute:: level
:rtype: int
The list nesting level
.. attribute:: body
A :class:`Paragraph` containing the list element contents.
"""
def __init__(self, level, body):
self.level = level
self.body = body
def str_(self, level=None):
return str("*" * self.level + " " + str(self.body) + '\n')
def __str__(self):
return self.str_()
def get_text(self):
return self.body.get_text()
def get_text_with_headings(self, include_heading = False):
return self.get_text()
class InfoBox(PageSkeleton):
def __init__(self, infobox_type, entries):
"""
An list element within a Wikipedia page.
.. attribute:: infobox_type
:rtype: str
The title/type of the infobox
.. attribute:: entries
Key-value pair, where key is a string, and value is a :class:`PageSkeleton` containing the value. Values are often paragraphs or images, but they can also be lists.
"""
self.title = infobox_type
self.entries = entries
def str_(self, level=None):
return self.title+ "\n"+ ("\n".join([key+": "+str(values) for (key,values) in self.entries]))
def __str__(self):
return self.str_()
def get_text(self):
return ''
def get_text_with_headings(self, include_heading = False):
return ''
class Paragraph(object):
"""
A paragraph.
"""
def __init__(self, para_id, bodies):
self.para_id = para_id
self.bodies = list(bodies)
@staticmethod
def from_cbor(cbor):
if (not cbor[0] == 0):
raise CborElementNotDefinedException(cbor)
paragraphId = cbor[1].decode('ascii')
return Paragraph(paragraphId, map(ParaBody.from_cbor, cbor[2]))
def get_text(self):
"""
Get all of the contained text.
:rtype: str
"""
return ''.join([body.get_text() for body in self.bodies])
def str_(self, level=None):
return ' '.join(str(body) for body in self.bodies)
def __str__(self):
return self.str_()
class ParaBody(object):
"""
An abstract superclass representing a bit of :class:`Paragraph` content.
"""
@staticmethod
def from_cbor(cbor):
tag = cbor[0]
if tag == 0:
return ParaText(cbor[1])
elif tag == 1:
cbor_ = cbor[1]
linkSection = None
if len(cbor_[2]) == 1:
linkSection = cbor_[2][0]
linkTargetId = cbor_[3].decode('ascii')
return ParaLink(cbor_[1], linkSection, linkTargetId, cbor_[4])
else:
raise CborElementNotDefinedException(cbor)
@abstractmethod
def get_text(self):
"""
Get all of the text within a :class:`ParaBody`.
:rtype: str
"""
raise NotImplementedError
class ParaText(ParaBody):
"""
A bit of plain text from a paragraph.
.. attribute:: text
:rtype: str
The text
"""
def __init__(self, text):
self.text = text
def get_text(self):
return self.text
def str_(self, level=None):
return self.text
def __str__(self):
return self.str_()
class ParaLink(ParaBody):
"""
A link within a paragraph.
.. attribute:: page
:rtype: PageName
The page name of the link target
.. attribute:: pageid
:rtype: PageId
The link target as trec-car identifer
.. attribute:: link_section
:rtype: str
Section anchor of link target (i.e. the part after the ``#`` in the
URL), or ``None``.
.. attribute:: anchor_text
:rtype: str
The anchor text of the link
"""
def __init__(self, page, link_section, pageid, anchor_text):
self.page = page
self.pageid = pageid
self.link_section = link_section
self.anchor_text = anchor_text
def get_text(self):
return self.anchor_text
def str_(self, level=None):
return "[%s](%s)" % (self.anchor_text, self.page)
def __str__(self):
return self.str_()
def _iter_with_header(file, parse, expected_file_types):
maybe_hdr = cbor.load(file)
if isinstance(maybe_hdr, list) and maybe_hdr[0] == 'CAR':
# we have a header
file_type = maybe_hdr[1][0]
if not file_type in expected_file_types:
# print( 'File type tag is expected to be ', (" ".join(expected_file_types)), 'but given file is of type ', file_type)
# print('Did not expect file of type', file_type)
raise WrongCarFileException(file_type, expected_file_types)
# read beginning of variable-length list
if (not file.read(1) == b'\x9f'):
raise BrokenCborFileException()
else:
yield parse(maybe_hdr)
while True:
try:
# Check for break symbol
if (peek_for_break(file)):
break
yield parse(cbor.load(file))
except EOFError:
break
def peek_for_break(cbor):
b = cbor.peek(1)
return b[0:1] == b'\xff'
def iter_annotations(file):
"""
Iterate over the :class:`Page`\ s of an annotations file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Page]
"""
return _iter_with_header(file, Page.from_cbor, [0,1])
# return TrecCarHeader.from_cbor(file)
def iter_pages(file):
"""
Iterate over the :class:`Page`\ s of an annotations file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Page]
"""
return _iter_with_header(file, Page.from_cbor, [0])
def iter_outlines(file):
"""
Iterate over the :class:`Page`\ s of an annotations file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Page]
"""
return _iter_with_header(file, Page.from_cbor, [1])
def iter_paragraphs(file):
"""
Iterate over the :class:`Paragraph`\ s of an paragraphs file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Paragraph]
"""
return _iter_with_header(file, Paragraph.from_cbor, [2])
def dump_annotations(file):
for page in iter_annotations(file):
print(page.to_string())
def with_toc(read_val):
class AnnotationsFile(object):
def __init__(self, fname):
"""
Read annotations from a file.
Arguments:
fname The name of the CBOR file. A table-of-contents file is
also expected to be present.
"""
self.cbor = open(fname, 'rb')
self.toc = cbor.load(open(fname+'.toc', 'rb'))
def keys(self):
""" The page names contained in an annotations file. """
return self.toc.keys()
def get(self, page):
""" Lookup a page by name. Returns a Page or None """
offset = self.toc.get(page)
if offset is not None:
self.cbor.seek(offset)
return read_val(cbor.load(self.cbor))
return None
return AnnotationsFile
AnnotationsFile = with_toc(Page.from_cbor)
ParagraphsFile = with_toc(Paragraph.from_cbor)
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/trec_car/read_data.py |
import csv
import urllib.parse
from typing import *
def encode_section_path(page_id, section_path):
elements = [page_id] + section_path
return '/'.join([urllib.parse.quote(elem) for elem in elements])
# return urllib.parse.urlencode({'page':page_id, 'sectionpath':section_path})
def encode_page_only(page_id):
return urllib.parse.quote(page_id)
class RankingEntry(object):
"""
A paragraph within a Wikipedia page.
Attributes:
paragraph The content of the Paragraph (which in turn contain a list of ParaBodys)
"""
def __init__(self, query_id:str, paragraph_id:str, rank:int, score:float, exp_name:str=None, paragraph_content:str=None):
assert(rank > 0)
self.query_id = query_id
self.paragraph_id = paragraph_id
self.rank = rank
self.score = score
self.exp_name = exp_name
self.paragraph_content = paragraph_content
def to_trec_eval_row(self, alternative_exp_name=None, page_only=False):
exp_name_ = alternative_exp_name if alternative_exp_name is not None \
else self.exp_name
return [self.query_id, 'Q0', self.paragraph_id, self.rank, self.score, exp_name_]
#
# csv.register_dialect(
# 'trec_eval',
# delimiter = ' ',
# quotechar = '"',
# doublequote = False,
# skipinitialspace = False,
# lineterminator = '\n',
# quoting = csv.QUOTE_NONE)
#
#
# def configure_csv_writer(fileobj):
# 'Convenience method to create a csv writer with the trec_eval_dialect'
# return csv.writer(fileobj, dialect='trec_eval')
#
def format_run(writer, ranking_of_paragraphs, exp_name=None):
'write one ranking to the csv writer'
for elem in ranking_of_paragraphs:
# query-number Q0 document-id rank score Exp
writer.write(" ".join([str(x) for x in elem.to_trec_eval_row(exp_name)]))
writer.write("\n")
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/trec_car/format_runs.py |
"""__init__ module for trec-car-tools, imports all necessary functions for reading cbor data provided in the TREC CAR"""
__version__ = 1.0
__all__ = ['read_data', 'format_runs']
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/build/lib/trec_car/__init__.py |
# Use python 3.6 or higher
# obsolete: conda install -c auto cbor=0.1.4
from __future__ import print_function
from abc import abstractmethod
import cbor
import itertools
import typing
PageId = str
PageName = str
class CborElementNotDefinedException(Exception):
def __init__(self, cbor):
self.cbor = cbor
Exception.__init__(self, 'unknown Cbor element encountrered: %s' % str(cbor))
class WrongCarFileException(Exception):
def __init__(self, file_type, expected_file_types):
self.file_type = file_type
self.expected_file_types = expected_file_types
Exception.__init__(self, 'Open method does not support CAR file type: %s. Instead expect following CAR file types: %s' % (str(file_type), str(expected_file_types)))
class BrokenCborFileException(Exception):
def __init__(self):
Exception.__init__(self, 'Corrupt, incomplete, or otherwise broken CBOR file. Please re-download or contact the organizers or use appropriate reader to open this file.')
class Page(object):
"""
The name and skeleton of a Wikipedia page.
.. attribute:: page_name
:rtype: PageName
The name of the page.
.. attribute:: skeleton
:rtype: typing.List[PageSkeleton]
The contents of the page
.. attribute:: page_type
:rtype: PageType
Type about the page
.. attribute:: page_meta
:rtype: PageMetadata
Metadata about the page
"""
def __init__(self, page_name, page_id, skeleton, page_type, page_meta):
self.page_name = page_name
self.page_id = page_id
self.skeleton = list(skeleton)
self.child_sections = [child for child in self.skeleton if isinstance(child, Section)]
self.page_type = page_type
self.page_meta = page_meta
def deep_headings_list(self):
return [child.nested_headings() for child in self.child_sections]
def flat_headings_list(self):
""" return
Returns a flat list of headings contained by the :class:`Page`.
:rtype: typing.List[Section]
"""
def flatten(prefix, headings):
for section, children in headings:
new_prefix = prefix + [section]
if len(children)>0 :
yield new_prefix
yield from flatten(new_prefix, children)
else:
yield new_prefix
deep_headings = self.deep_headings_list()
return list(flatten([], deep_headings))
def get_infoboxes(self):
toplevel_infoboxes = [child for child in self.skeleton if isinstance(child, InfoBox)]
section_infoboxes = [section.get_infoboxes()
for sections
in self.flat_headings_list()
for section in sections]
return toplevel_infoboxes + list(itertools.chain.from_iterable(section_infoboxes))
@staticmethod
def from_cbor(cbor):
if not (cbor[0] == 0 or cbor[0] == 1): # tag
raise CborElementNotDefinedException(cbor)
pagename = cbor[1]
pageId = cbor[2].decode('ascii')
if len(cbor)==4:
return Page(pagename, pageId, map(PageSkeleton.from_cbor, cbor[3]), ArticlePage, PageMetadata.default())
else:
page_type = PageType.from_cbor(cbor[4])
return Page(pagename, pageId, map(PageSkeleton.from_cbor, cbor[3]), page_type, PageMetadata.from_cbor(cbor[5]))
def __str__(self):
return "Page(%s)" % self.page_name
def to_string(self):
"""
Render a string representation of the page.
:rtype: str
"""
return self.page_name + self.page_meta +\
'\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + '\n'.join(str(s) for s in self.skeleton)
def nested_headings(self):
"""
Each heading recursively represented by a pair of ``(heading,
list_of_child_sections)``.
:rtype: typing.List[typing.Tuple[Section, typing.List[Section]]]
"""
result = [child.nested_headings() for child in self.child_sections]
return result
def outline(self):
return self.child_sections
def get_text(self):
"""Include all visible text below this elements. Includes Captions of images, but no headings and no infoboxes. See `get_text_with_headings` for a version that includes headings."""
return '\n'.join(skel.get_text() for skel in self.skeleton)
def get_text_with_headings(self, include_heading = False):
"""Include all visible text below this elements. While the heading of this element is excluded, headings of subsections will be included. Captions of images are excluded."""
return '\n'.join(skel.get_text_with_headings(include_heading = True) for skel in self.skeleton)
class PageType(object):
"""
An abstract base class representing the various types of pages.
Subclasses include
* :class:`ArticlePage`
* :class:`CategoryPage`
* :class:`DisambiguationPage`
* :class:`RedirectPage`
"""
@staticmethod
def from_cbor(cbor):
typetag = cbor[0]
if typetag == 0: return ArticlePage()
elif typetag == 1: return CategoryPage()
elif typetag == 2: return DisambiguationPage()
elif typetag == 3:
target = cbor[1]
if type(target) == list: # TODO this is almost certainly wrong
targetPage = target[1]
else:
targetPage = target.decode('ascii')
return RedirectPage(targetPage)
else:
raise CborElementNotDefinedException(cbor)
class ArticlePage(PageType):
''
def __init__(self):
pass
def __str__(self): return "ArticlePage"
class CategoryPage(PageType):
def __init__(self):
pass
def __str__(self): return "CategoryPage"
class DisambiguationPage(PageType):
def __init__(self):
pass
def __str__(self): return "Disambiguation Page"
class RedirectPage(PageType):
"""
.. attribute:: targetPage
:rtype: PageId
The target of the redirect.
"""
def __init__(self, targetPage):
self.targetPage = targetPage
def __str__(self):
return "RedirectPage " + self.targetPage
class PageMetadata(object):
"""
Meta data for a page
.. attribute:: redirectNames
:rtype: PageName
Names of pages which redirect to this page
.. attribute:: disambiguationNames
:rtype: PageName
Names of disambiguation pages which link to this page
.. attribute:: disambiguationId
:rtype: PageId
Page IDs of disambiguation pages which link to this page
.. attribute:: categoryNames
:rtype: str
Page names of categories to which this page belongs
.. attribute:: categoryIds
:rtype: str
Page IDs of categories to which this page belongs
.. attribute:: inlinkIds
:rtype: str
Page IDs of pages containing inlinks
.. attribute:: inlinkAnchors
inlinkAnchor frequencies
:rtype: str
(Anchor text, frequency) of pages containing inlinks
"""
def __init__(self, redirectNames, disambiguationNames, disambiguationIds, categoryNames, categoryIds, inlinkIds,
inlinkAnchors):
self.inlinkAnchors = inlinkAnchors
self.inlinkIds = inlinkIds
self.categoryIds = categoryIds
self.categoryNames = categoryNames
self.disambiguationIds = disambiguationIds
self.disambiguationNames = disambiguationNames
self.redirectNames = redirectNames
@staticmethod
def default():
return PageMetadata(None, None, None, None, None, None, None)
def __str__(self):
redirStr = ("" if self.redirectNames is None else (" redirected = "+", ".join([name for name in self.redirectNames])))
disamStr = ("" if self.disambiguationNames is None else (" disambiguated = "+", ".join([name for name in self.disambiguationNames])))
catStr = ("" if self.redirectNames is None else (" categories = "+", ".join([name for name in (self.categoryNames or [])])))
inlinkStr = ("" if self.inlinkIds is None else (" inlinks = "+", ".join([name for name in self.inlinkIds])))
# inlinkAnchorStr = str (self.inlinkAnchors)
inlinkAnchorStr = ("" if self.inlinkAnchors is None else
(" inlinkAnchors = "+", ".join(
[ ("%s: %d" % (name, freq)) for (name, freq) in self.inlinkAnchors]
# [ ("%s: " % (name)) for (name, freq) in self.inlinkAnchors] \
)))
return "%s \n%s \n%s \n%s \n%s\n" % (redirStr, disamStr, catStr, inlinkStr, inlinkAnchorStr)
@staticmethod
def from_cbor(cbor):
redirectNames=None
disambiguationNames=None
disambiguationIds=None
categoryNames=None
categoryIds=None
inlinkIds=None
inlinkAnchors=None
def decodeListOfIdList(cbor):
if len(cbor)==0: return None
else:
return [elem.decode('ascii') for elem in cbor]
def decodeListOfNameList(cbor):
if len(cbor)==0: return None
else:
return cbor
def decodeListOfNameIntList(cbor):
if len(cbor)==0: return None
else:
# need to convert list of pair-lists to lists of pair-tuples
return [(elem[0], elem[1]) for elem in cbor]
for i in range(0, len(cbor), 2):
tag = cbor[i][0]
cbor_data = cbor[i+1]
if tag == 0:
redirectNames = decodeListOfNameList(cbor_data)
elif tag == 1:
disambiguationNames=decodeListOfNameList(cbor_data)
elif tag == 2:
disambiguationIds=decodeListOfIdList(cbor_data)
elif tag == 3:
categoryNames=decodeListOfNameList(cbor_data)
elif tag == 4:
categoryIds=decodeListOfIdList(cbor_data)
elif tag == 5:
inlinkIds=decodeListOfIdList(cbor_data)
elif tag == 6:
# compatability with v1.6
inlinkAnchors = [(anchor, 1) for anchor in decodeListOfNameList(cbor_data)]
elif tag == 7:
# compatability with v2.0
inlinkAnchors = decodeListOfNameIntList(cbor_data)
i+=2
return PageMetadata(redirectNames, disambiguationNames, disambiguationIds, categoryNames, categoryIds, inlinkIds, inlinkAnchors)
class PageSkeleton(object):
"""
An abstract superclass for the various types of page elements. Subclasses include:
* :class:`Section`
* :class:`Para`
* :class:`Image`
"""
@staticmethod
def from_cbor(cbor):
tag = cbor[0]
if tag == 0: # section
heading = cbor[1]
headingId = cbor[2].decode('ascii')
return Section(heading, headingId, map(PageSkeleton.from_cbor, cbor[3]))
elif tag == 1: # para-wrapper
return Para(Paragraph.from_cbor(cbor[1]))
elif tag == 2: #images
imageUrl = cbor[1]
caption = [PageSkeleton.from_cbor(elem) for elem in cbor[2]]
return Image(imageUrl, caption=caption)
elif tag == 3: # paragraph
level = cbor[1]
body = Paragraph.from_cbor(cbor[2])
return List(level, body)
elif tag == 4: # infobox
infobox_title = cbor[1]
cbor_entries = cbor[2]
entries = [ (kv[0], PageSkeleton.from_cbor(kv[1][0])) for kv in cbor_entries if kv[1] and kv[1][0]] # if no value is defined kv[1] will be null.
return InfoBox(infobox_title, entries)
else:
raise CborElementNotDefinedException(cbor)
def get_text(self):
"""Includes visible text of this element and below. Headings are excluded. Image Captions are included. Infoboxes are ignored. (For a version with headers and no captions see `get_text_with_headings` """
raise NotImplementedError
def get_text_with_headings(self, include_heading = False):
"""Include all visible text below this elements. While the heading of this element is excluded, headings of subsections will be included. Captions of images are excluded."""
raise NotImplementedError
class Section(PageSkeleton):
"""
A section of a Wikipedia page.
.. attribute:: heading
:rtype: str
The section heading.
.. attribute:: headingId
:rtype: str
The unique identifier of a section heading.
.. attribute:: children
:rtype: typing.List[PageSkeleton]
The :class:`PageSkeleton` elements contained by the section.
"""
def __init__(self, heading, headingId, children):
self.heading = heading
self.headingId = headingId
self.children = list(children)
self.child_sections = [child for child in self.children if isinstance(child, Section)]
def str_(self, level):
bar = "".join("="*level)
children = "".join(c.str_(level=level+1) for c in self.children)
return "\n%s %s %s\n\n%s" % (bar, self.heading, bar, children)
def __str__(self):
return self.str_(level=1)
def __getitem__(self, idx):
return self.children[idx]
def nested_headings(self):
return (self, [child.nested_headings() for child in self.child_sections])
def get_text(self):
return '\n'.join(child.get_text() for child in self.children)
def get_text_with_headings(self, include_heading = False):
opt_heading = self.heading + "\n" if include_heading else ""
return opt_heading + '\n'.join(child.get_text_with_headings(include_heading = True) for child in self.children)
def get_infoboxes(self):
return [child for child in self.children if isinstance(child, InfoBox)]
class Para(PageSkeleton):
"""
A paragraph within a Wikipedia page.
.. attribute:: paragraph
:rtype: Paragraph
The content of the Paragraph (which in turn contain a list of :class:`ParaBody`\ s)
"""
def __init__(self, paragraph):
self.paragraph = paragraph
def str_(self, level=None):
return str(self.paragraph)
def __str__(self):
return self.str_()
def get_text(self):
return self.paragraph.get_text()
def get_text_with_headings(self, include_heading = False):
return self.get_text()
class Image(PageSkeleton):
"""
An image within a Wikipedia page.
.. attribute:: caption
:rtype: str
PageSkeleton representing the caption of the image
.. attribute:: imageurl
:rtype: str
URL to the image; spaces need to be replaced with underscores, Wikimedia
Commons namespace needs to be prefixed
"""
def __init__(self, imageurl, caption):
self.caption = caption
self.imageurl = imageurl
def str_(self, level=None):
return str("!["+self.imageurl+"]. Caption: "+(''.join([str(skel) for skel in self.caption])))
def __str__(self):
return self.str_()
def get_text(self):
return '\n'.join(skel.get_text() for skel in self.caption)
def get_text_with_headings(self, include_heading = False):
return ''
class List(PageSkeleton):
"""
An list element within a Wikipedia page.
.. attribute:: level
:rtype: int
The list nesting level
.. attribute:: body
A :class:`Paragraph` containing the list element contents.
"""
def __init__(self, level, body):
self.level = level
self.body = body
def str_(self, level=None):
return str("*" * self.level + " " + str(self.body) + '\n')
def __str__(self):
return self.str_()
def get_text(self):
return self.body.get_text()
def get_text_with_headings(self, include_heading = False):
return self.get_text()
class InfoBox(PageSkeleton):
def __init__(self, infobox_type, entries):
"""
An list element within a Wikipedia page.
.. attribute:: infobox_type
:rtype: str
The title/type of the infobox
.. attribute:: entries
Key-value pair, where key is a string, and value is a :class:`PageSkeleton` containing the value. Values are often paragraphs or images, but they can also be lists.
"""
self.title = infobox_type
self.entries = entries
def str_(self, level=None):
return self.title+ "\n"+ ("\n".join([key+": "+str(values) for (key,values) in self.entries]))
def __str__(self):
return self.str_()
def get_text(self):
return ''
def get_text_with_headings(self, include_heading = False):
return ''
class Paragraph(object):
"""
A paragraph.
"""
def __init__(self, para_id, bodies):
self.para_id = para_id
self.bodies = list(bodies)
@staticmethod
def from_cbor(cbor):
if (not cbor[0] == 0):
raise CborElementNotDefinedException(cbor)
paragraphId = cbor[1].decode('ascii')
return Paragraph(paragraphId, map(ParaBody.from_cbor, cbor[2]))
def get_text(self):
"""
Get all of the contained text.
:rtype: str
"""
return ''.join([body.get_text() for body in self.bodies])
def str_(self, level=None):
return ' '.join(str(body) for body in self.bodies)
def __str__(self):
return self.str_()
class ParaBody(object):
"""
An abstract superclass representing a bit of :class:`Paragraph` content.
"""
@staticmethod
def from_cbor(cbor):
tag = cbor[0]
if tag == 0:
return ParaText(cbor[1])
elif tag == 1:
cbor_ = cbor[1]
linkSection = None
if len(cbor_[2]) == 1:
linkSection = cbor_[2][0]
linkTargetId = cbor_[3].decode('ascii')
return ParaLink(cbor_[1], linkSection, linkTargetId, cbor_[4])
else:
raise CborElementNotDefinedException(cbor)
@abstractmethod
def get_text(self):
"""
Get all of the text within a :class:`ParaBody`.
:rtype: str
"""
raise NotImplementedError
class ParaText(ParaBody):
"""
A bit of plain text from a paragraph.
.. attribute:: text
:rtype: str
The text
"""
def __init__(self, text):
self.text = text
def get_text(self):
return self.text
def str_(self, level=None):
return self.text
def __str__(self):
return self.str_()
class ParaLink(ParaBody):
"""
A link within a paragraph.
.. attribute:: page
:rtype: PageName
The page name of the link target
.. attribute:: pageid
:rtype: PageId
The link target as trec-car identifer
.. attribute:: link_section
:rtype: str
Section anchor of link target (i.e. the part after the ``#`` in the
URL), or ``None``.
.. attribute:: anchor_text
:rtype: str
The anchor text of the link
"""
def __init__(self, page, link_section, pageid, anchor_text):
self.page = page
self.pageid = pageid
self.link_section = link_section
self.anchor_text = anchor_text
def get_text(self):
return self.anchor_text
def str_(self, level=None):
return "[%s](%s)" % (self.anchor_text, self.page)
def __str__(self):
return self.str_()
def _iter_with_header(file, parse, expected_file_types):
maybe_hdr = cbor.load(file)
if isinstance(maybe_hdr, list) and maybe_hdr[0] == 'CAR':
# we have a header
file_type = maybe_hdr[1][0]
if not file_type in expected_file_types:
# print( 'File type tag is expected to be ', (" ".join(expected_file_types)), 'but given file is of type ', file_type)
# print('Did not expect file of type', file_type)
raise WrongCarFileException(file_type, expected_file_types)
# read beginning of variable-length list
if (not file.read(1) == b'\x9f'):
raise BrokenCborFileException()
else:
yield parse(maybe_hdr)
while True:
try:
# Check for break symbol
if (peek_for_break(file)):
break
yield parse(cbor.load(file))
except EOFError:
break
def peek_for_break(cbor):
b = cbor.peek(1)
return b[0:1] == b'\xff'
def iter_annotations(file):
"""
Iterate over the :class:`Page`\ s of an annotations file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Page]
"""
return _iter_with_header(file, Page.from_cbor, [0,1])
# return TrecCarHeader.from_cbor(file)
def iter_pages(file):
"""
Iterate over the :class:`Page`\ s of an annotations file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Page]
"""
return _iter_with_header(file, Page.from_cbor, [0])
def iter_outlines(file):
"""
Iterate over the :class:`Page`\ s of an annotations file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Page]
"""
return _iter_with_header(file, Page.from_cbor, [1])
def iter_paragraphs(file):
"""
Iterate over the :class:`Paragraph`\ s of an paragraphs file.
:type file: typing.BinaryIO
:rtype: typing.Iterator[Paragraph]
"""
return _iter_with_header(file, Paragraph.from_cbor, [2])
def dump_annotations(file):
for page in iter_annotations(file):
print(page.to_string())
def with_toc(read_val):
class AnnotationsFile(object):
def __init__(self, fname):
"""
Read annotations from a file.
Arguments:
fname The name of the CBOR file. A table-of-contents file is
also expected to be present.
"""
self.cbor = open(fname, 'rb')
self.toc = cbor.load(open(fname+'.toc', 'rb'))
def keys(self):
""" The page names contained in an annotations file. """
return self.toc.keys()
def get(self, page):
""" Lookup a page by name. Returns a Page or None """
offset = self.toc.get(page)
if offset is not None:
self.cbor.seek(offset)
return read_val(cbor.load(self.cbor))
return None
return AnnotationsFile
AnnotationsFile = with_toc(Page.from_cbor)
ParagraphsFile = with_toc(Paragraph.from_cbor)
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/build/lib/trec_car/read_data.py |
import csv
import urllib.parse
from typing import *
def encode_section_path(page_id, section_path):
elements = [page_id] + section_path
return '/'.join([urllib.parse.quote(elem) for elem in elements])
# return urllib.parse.urlencode({'page':page_id, 'sectionpath':section_path})
def encode_page_only(page_id):
return urllib.parse.quote(page_id)
class RankingEntry(object):
"""
A paragraph within a Wikipedia page.
Attributes:
paragraph The content of the Paragraph (which in turn contain a list of ParaBodys)
"""
def __init__(self, query_id:str, paragraph_id:str, rank:int, score:float, exp_name:str=None, paragraph_content:str=None):
assert(rank > 0)
self.query_id = query_id
self.paragraph_id = paragraph_id
self.rank = rank
self.score = score
self.exp_name = exp_name
self.paragraph_content = paragraph_content
def to_trec_eval_row(self, alternative_exp_name=None, page_only=False):
exp_name_ = alternative_exp_name if alternative_exp_name is not None \
else self.exp_name
return [self.query_id, 'Q0', self.paragraph_id, self.rank, self.score, exp_name_]
#
# csv.register_dialect(
# 'trec_eval',
# delimiter = ' ',
# quotechar = '"',
# doublequote = False,
# skipinitialspace = False,
# lineterminator = '\n',
# quoting = csv.QUOTE_NONE)
#
#
# def configure_csv_writer(fileobj):
# 'Convenience method to create a csv writer with the trec_eval_dialect'
# return csv.writer(fileobj, dialect='trec_eval')
#
def format_run(writer, ranking_of_paragraphs, exp_name=None):
'write one ranking to the csv writer'
for elem in ranking_of_paragraphs:
# query-number Q0 document-id rank score Exp
writer.write(" ".join([str(x) for x in elem.to_trec_eval_row(exp_name)]))
writer.write("\n")
| datasets-server-main | services/worker/vendors/trec-car-tools/python3/build/lib/trec_car/format_runs.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import datasets
import pytest
from pytest import TempPathFactory
from worker.resources import LibrariesResource
@pytest.mark.parametrize(
"define_init_hf_datasets_cache,define_numba_path",
[(False, False), (False, True), (True, False), (True, True)],
)
def test_libraries(
tmp_path_factory: TempPathFactory, define_init_hf_datasets_cache: bool, define_numba_path: bool
) -> None:
hf_endpoint = "https://another.endpoint"
init_hf_datasets_cache = (
str(tmp_path_factory.mktemp("hf_datasets_cache")) if define_init_hf_datasets_cache else None
)
numba_path = str(tmp_path_factory.mktemp("numba_path")) if define_numba_path else None
assert datasets.config.HF_ENDPOINT != hf_endpoint
resource = LibrariesResource(
hf_endpoint=hf_endpoint, init_hf_datasets_cache=init_hf_datasets_cache, numba_path=numba_path
)
assert datasets.config.HF_ENDPOINT == hf_endpoint
assert (numba_path in resource.storage_paths) == define_numba_path
assert str(resource.hf_datasets_cache) in resource.storage_paths
assert str(datasets.config.HF_MODULES_CACHE) in resource.storage_paths
assert not datasets.config.HF_UPDATE_DOWNLOAD_COUNTS
assert (str(resource.hf_datasets_cache) == init_hf_datasets_cache) == define_init_hf_datasets_cache
resource.release()
assert datasets.config.HF_ENDPOINT != hf_endpoint
def test_libraries_context_manager(tmp_path_factory: TempPathFactory) -> None:
hf_endpoint = "https://another.endpoint"
init_hf_datasets_cache = str(tmp_path_factory.mktemp("hf_datasets_cache"))
numba_path = str(tmp_path_factory.mktemp("numba_path"))
assert datasets.config.HF_ENDPOINT != hf_endpoint
with LibrariesResource(
hf_endpoint=hf_endpoint, init_hf_datasets_cache=init_hf_datasets_cache, numba_path=numba_path
):
assert datasets.config.HF_ENDPOINT == hf_endpoint
assert datasets.config.HF_ENDPOINT != hf_endpoint
| datasets-server-main | services/worker/tests/test_resources.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from http import HTTPStatus
from typing import Optional
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.storage import StrPath
from libcommon.utils import JobInfo, Priority
from worker.config import AppConfig
from worker.job_runner_factory import JobRunnerFactory
from worker.resources import LibrariesResource
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
@pytest.fixture()
def processing_graph(app_config: AppConfig) -> ProcessingGraph:
return ProcessingGraph(app_config.processing_graph.specification)
@pytest.mark.parametrize(
"level,job_type,expected_job_runner",
[
("dataset", "dataset-config-names", "DatasetConfigNamesJobRunner"),
("split", "split-first-rows-from-streaming", "SplitFirstRowsFromStreamingJobRunner"),
("config", "config-parquet-and-info", "ConfigParquetAndInfoJobRunner"),
("config", "config-parquet", "ConfigParquetJobRunner"),
("dataset", "dataset-parquet", "DatasetParquetJobRunner"),
("config", "config-info", "ConfigInfoJobRunner"),
("dataset", "dataset-info", "DatasetInfoJobRunner"),
("config", "config-size", "ConfigSizeJobRunner"),
("dataset", "dataset-size", "DatasetSizeJobRunner"),
(None, "/unknown", None),
],
)
def test_create_job_runner(
app_config: AppConfig,
processing_graph: ProcessingGraph,
libraries_resource: LibrariesResource,
assets_directory: StrPath,
parquet_metadata_directory: StrPath,
duckdb_index_cache_directory: StrPath,
statistics_cache_directory: StrPath,
level: Optional[str],
job_type: str,
expected_job_runner: Optional[str],
) -> None:
factory = JobRunnerFactory(
app_config=app_config,
processing_graph=processing_graph,
hf_datasets_cache=libraries_resource.hf_datasets_cache,
assets_directory=assets_directory,
parquet_metadata_directory=parquet_metadata_directory,
duckdb_index_cache_directory=duckdb_index_cache_directory,
statistics_cache_directory=statistics_cache_directory,
)
dataset, config, split = "dataset", "config", "split"
job_info: JobInfo = {
"type": job_type,
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
}
if level in {"split", "config"}:
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
if level == "split":
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
if expected_job_runner is None:
with pytest.raises(KeyError):
factory.create_job_runner(job_info=job_info)
else:
job_runner = factory.create_job_runner(job_info=job_info)
assert job_runner.__class__.__name__ == expected_job_runner
| datasets-server-main | services/worker/tests/test_job_runner_factory.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from pathlib import Path
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from libcommon.storage import (
StrPath,
init_assets_dir,
init_duckdb_index_cache_dir,
init_parquet_metadata_dir,
init_statistics_cache_dir,
)
from pytest import MonkeyPatch, fixture
from worker.config import AppConfig
from worker.main import WORKER_STATE_FILE_NAME
from worker.resources import LibrariesResource
from .constants import (
CI_APP_TOKEN,
CI_HUB_ENDPOINT,
CI_PARQUET_CONVERTER_APP_TOKEN,
CI_URL_TEMPLATE,
)
@fixture
def datasets_cache_directory(tmp_path: Path) -> Path:
return tmp_path / "datasets"
@fixture
def modules_cache_directory(tmp_path: Path) -> Path:
return tmp_path / "modules"
@fixture
def worker_state_file_path(tmp_path: Path) -> str:
return str(tmp_path / WORKER_STATE_FILE_NAME)
@fixture
def statistics_cache_directory(app_config: AppConfig) -> StrPath:
return init_statistics_cache_dir(app_config.descriptive_statistics.cache_directory)
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture(scope="session", autouse=True)
def monkeypatch_session() -> Iterator[MonkeyPatch]:
mp = MonkeyPatch()
mp.setattr("huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_URL_TEMPLATE)
# ^ see https://github.com/huggingface/datasets/pull/5196#issuecomment-1322191056
mp.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
mp.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
yield mp
mp.undo()
# see https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
@fixture
def set_env_vars(
datasets_cache_directory: Path, modules_cache_directory: Path, worker_state_file_path: str
) -> Iterator[MonkeyPatch]:
mp = MonkeyPatch()
mp.setenv("CACHE_MONGO_DATABASE", "datasets_server_cache_test")
mp.setenv("QUEUE_MONGO_DATABASE", "datasets_server_queue_test")
mp.setenv("COMMON_HF_ENDPOINT", CI_HUB_ENDPOINT)
mp.setenv("COMMON_HF_TOKEN", CI_APP_TOKEN)
mp.setenv("ASSETS_BASE_URL", "http://localhost/assets")
mp.setenv("FIRST_ROWS_MAX_NUMBER", "7")
mp.setenv("PARQUET_AND_INFO_MAX_DATASET_SIZE", "10_000")
mp.setenv("DESCRIPTIVE_STATISTICS_MAX_PARQUET_SIZE_BYTES", "10_000")
mp.setenv("PARQUET_AND_INFO_MAX_EXTERNAL_DATA_FILES", "10")
mp.setenv("PARQUET_AND_INFO_COMMITTER_HF_TOKEN", CI_PARQUET_CONVERTER_APP_TOKEN)
mp.setenv("DUCKDB_INDEX_COMMITTER_HF_TOKEN", CI_PARQUET_CONVERTER_APP_TOKEN)
mp.setenv("DATASETS_BASED_HF_DATASETS_CACHE", str(datasets_cache_directory))
mp.setenv("HF_MODULES_CACHE", str(modules_cache_directory))
mp.setenv("WORKER_CONTENT_MAX_BYTES", "10_000_000")
mp.setenv("WORKER_STATE_FILE_PATH", worker_state_file_path)
mp.setenv("WORKER_HEARTBEAT_INTERVAL_SECONDS", "1")
mp.setenv("WORKER_KILL_ZOMBIES_INTERVAL_SECONDS", "1")
mp.setenv("WORKER_KILL_LONG_JOB_INTERVAL_SECONDS", "1")
mp.setenv("OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN", "dummy_spawning_token")
yield mp
mp.undo()
@fixture
def app_config(set_env_vars: MonkeyPatch) -> Iterator[AppConfig]:
app_config = AppConfig.from_env()
if "test" not in app_config.cache.mongo_database or "test" not in app_config.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
yield app_config
@fixture
def cache_mongo_resource(app_config: AppConfig) -> Iterator[CacheMongoResource]:
with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource:
yield resource
_clean_cache_database()
@fixture
def queue_mongo_resource(app_config: AppConfig) -> Iterator[QueueMongoResource]:
with QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as resource:
yield resource
_clean_queue_database()
@fixture
def libraries_resource(app_config: AppConfig) -> Iterator[LibrariesResource]:
with LibrariesResource(
hf_endpoint=app_config.common.hf_endpoint,
init_hf_datasets_cache=app_config.datasets_based.hf_datasets_cache,
numba_path=app_config.numba.path,
) as libraries_resource:
yield libraries_resource
@fixture
def assets_directory(app_config: AppConfig) -> StrPath:
return init_assets_dir(app_config.assets.storage_directory)
@fixture
def parquet_metadata_directory(app_config: AppConfig) -> StrPath:
return init_parquet_metadata_dir(app_config.parquet_metadata.storage_directory)
@fixture
def duckdb_index_cache_directory(app_config: AppConfig) -> StrPath:
return init_duckdb_index_cache_dir(app_config.duckdb_index.cache_directory)
@fixture
def test_processing_graph() -> ProcessingGraph:
return ProcessingGraph(
{
"dummy": {"input_type": "dataset"},
"dummy2": {"input_type": "dataset"},
}
)
@fixture
def test_processing_step(test_processing_graph: ProcessingGraph) -> ProcessingStep:
return test_processing_graph.get_processing_step("dummy")
@fixture
def another_processing_step(test_processing_graph: ProcessingGraph) -> ProcessingStep:
return test_processing_graph.get_processing_step("dummy2")
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.datasets", "tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
| datasets-server-main | services/worker/tests/conftest.py |
from dataclasses import dataclass
from http import HTTPStatus
from typing import Optional
import pytest
from libcommon.exceptions import CustomError
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.queue import JobDocument, Queue
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedResponseDocument, get_response, upsert_response
from libcommon.utils import JobInfo, Priority, Status
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_manager import JobManager
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
from .fixtures.hub import get_default_config_split
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> None:
# prepare the database before each test, and clean it afterwards
pass
class DummyJobRunner(DatasetJobRunner):
@staticmethod
def get_job_runner_version() -> int:
return 1
@staticmethod
def get_job_type() -> str:
return "dummy"
def compute(self) -> CompleteJobResult:
return CompleteJobResult({"key": "value"})
@dataclass
class CacheEntry:
error_code: Optional[str]
job_runner_version: Optional[int]
dataset_git_revision: Optional[str]
progress: Optional[float] = None
def test_check_type(
test_processing_graph: ProcessingGraph,
another_processing_step: ProcessingStep,
test_processing_step: ProcessingStep,
app_config: AppConfig,
) -> None:
job_id = "job_id"
dataset = "dataset"
revision = "revision"
config = "config"
split = "split"
job_type = f"not-{test_processing_step.job_type}"
job_info = JobInfo(
job_id=job_id,
type=job_type,
params={
"dataset": dataset,
"revision": revision,
"config": config,
"split": split,
},
priority=Priority.NORMAL,
difficulty=50,
)
with pytest.raises(ValueError):
job_runner = DummyJobRunner(
job_info=job_info,
processing_step=test_processing_step,
app_config=app_config,
)
JobManager(
job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
)
job_info = JobInfo(
job_id=job_id,
type=test_processing_step.job_type,
params={
"dataset": dataset,
"revision": revision,
"config": config,
"split": split,
},
priority=Priority.NORMAL,
difficulty=50,
)
with pytest.raises(ValueError):
job_runner = DummyJobRunner(
job_info=job_info,
processing_step=another_processing_step,
app_config=app_config,
)
JobManager(
job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
)
@pytest.mark.parametrize(
"priority",
[
Priority.LOW,
Priority.NORMAL,
],
)
def test_backfill(priority: Priority, app_config: AppConfig) -> None:
graph = ProcessingGraph(
{
"dummy": {"input_type": "dataset"},
"dataset-child": {"input_type": "dataset", "triggered_by": "dummy"},
"config-child": {"input_type": "config", "triggered_by": "dummy"},
"dataset-unrelated": {"input_type": "dataset"},
}
)
root_step = graph.get_processing_step("dummy")
queue = Queue()
assert JobDocument.objects().count() == 0
queue.add_job(
job_type=root_step.job_type,
dataset="dataset",
revision="revision",
config=None,
split=None,
priority=priority,
difficulty=50,
)
job_info = queue.start_job()
assert job_info["priority"] == priority
job_runner = DummyJobRunner(
job_info=job_info,
processing_step=root_step,
app_config=app_config,
)
job_manager = JobManager(job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=graph)
assert job_manager.priority == priority
job_result = job_manager.run_job()
assert job_result["is_success"]
assert job_result["output"] is not None
assert job_result["output"]["content"] == {"key": "value"}
job_manager.finish(job_result=job_result)
# check that the job has been finished
job = queue.get_job_with_id(job_id=job_info["job_id"])
assert job.status in [Status.SUCCESS, Status.ERROR, Status.CANCELLED]
assert job.priority == priority
# check that the cache entry has have been created
cached_response = get_response(kind=root_step.cache_kind, dataset="dataset", config=None, split=None)
assert cached_response is not None
assert cached_response["http_status"] == HTTPStatus.OK
assert cached_response["error_code"] is None
assert cached_response["content"] == {"key": "value"}
assert cached_response["dataset_git_revision"] == "revision"
assert cached_response["job_runner_version"] == 1
assert cached_response["progress"] == 1.0
dataset_child_jobs = queue.get_dump_with_status(job_type="dataset-child", status=Status.WAITING)
assert len(dataset_child_jobs) == 1
assert dataset_child_jobs[0]["dataset"] == "dataset"
assert dataset_child_jobs[0]["revision"] == "revision"
assert dataset_child_jobs[0]["config"] is None
assert dataset_child_jobs[0]["split"] is None
assert dataset_child_jobs[0]["priority"] is priority.value
dataset_unrelated_jobs = queue.get_dump_with_status(job_type="dataset-unrelated", status=Status.WAITING)
assert len(dataset_unrelated_jobs) == 0
# ^ the dataset-unrelated job is not triggered by the dummy job, so it should not be created
# check that no config level jobs have been created, because the config names are not known
config_child_jobs = queue.get_dump_with_status(job_type="config-child", status=Status.WAITING)
assert len(config_child_jobs) == 0
def test_job_runner_set_crashed(
test_processing_graph: ProcessingGraph,
test_processing_step: ProcessingStep,
app_config: AppConfig,
) -> None:
dataset = "dataset"
revision = "revision"
config = "config"
split = "split"
message = "I'm crashed :("
queue = Queue()
assert JobDocument.objects().count() == 0
queue.add_job(
job_type=test_processing_step.job_type,
dataset=dataset,
revision=revision,
config=config,
split=split,
priority=Priority.NORMAL,
difficulty=50,
)
job_info = queue.start_job()
job_runner = DummyJobRunner(
job_info=job_info,
processing_step=test_processing_step,
app_config=app_config,
)
job_manager = JobManager(
job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
)
job_manager.set_crashed(message=message)
response = CachedResponseDocument.objects()[0]
expected_error = {"error": message}
assert response.http_status == HTTPStatus.NOT_IMPLEMENTED
assert response.error_code == "JobManagerCrashedError"
assert response.dataset == dataset
assert response.dataset_git_revision == revision
assert response.config == config
assert response.split == split
assert response.content == expected_error
assert response.details == expected_error
# TODO: check if it stores the correct dataset git sha and job version when it's implemented
def test_raise_if_parallel_response_exists(
test_processing_graph: ProcessingGraph,
test_processing_step: ProcessingStep,
app_config: AppConfig,
) -> None:
dataset = "dataset"
revision = "revision"
config = "config"
split = "split"
upsert_response(
kind="dummy-parallel",
dataset=dataset,
config=config,
split=split,
content={},
dataset_git_revision=revision,
job_runner_version=1,
progress=1.0,
http_status=HTTPStatus.OK,
)
job_info = JobInfo(
job_id="job_id",
type="dummy",
params={
"dataset": dataset,
"revision": revision,
"config": config,
"split": split,
},
priority=Priority.NORMAL,
difficulty=50,
)
job_runner = DummyJobRunner(
job_info=job_info,
processing_step=test_processing_step,
app_config=app_config,
)
job_manager = JobManager(
job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
)
with pytest.raises(CustomError) as exc_info:
job_manager.raise_if_parallel_response_exists(parallel_cache_kind="dummy-parallel", parallel_job_version=1)
assert exc_info.value.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
assert exc_info.value.code == "ResponseAlreadyComputedError"
def test_doesnotexist(app_config: AppConfig) -> None:
dataset = "doesnotexist"
revision = "revision"
config, split = get_default_config_split()
job_info = JobInfo(
job_id="job_id",
type="dummy",
params={
"dataset": dataset,
"revision": revision,
"config": config,
"split": split,
},
priority=Priority.NORMAL,
difficulty=50,
)
processing_step_name = "dummy"
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DummyJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
processing_step = processing_graph.get_processing_step(processing_step_name)
job_runner = DummyJobRunner(
job_info=job_info,
processing_step=processing_step,
app_config=app_config,
)
job_manager = JobManager(
job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=processing_graph
)
job_result = job_manager.process()
# ^ the job is processed, since we don't contact the Hub to check if the dataset exists
assert job_result["output"] is not None
assert job_result["output"]["content"] == {"key": "value"}
| datasets-server-main | services/worker/tests/test_job_manager.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import os
# see https://github.com/huggingface/moon-landing/blob/main/server/scripts/staging-seed-db.ts
CI_APP_TOKEN = "hf_app_datasets-server_token"
CI_PARQUET_CONVERTER_APP_TOKEN = "hf_app_datasets-server-parquet-converter_token"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_USER = "__DUMMY_DATASETS_SERVER_USER__"
CI_USER_TOKEN = "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"
CI_SPAWNING_TOKEN = os.getenv("CI_SPAWNING_TOKEN", "unset")
| datasets-server-main | services/worker/tests/constants.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/tests/__init__.py |
from dataclasses import replace
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_runner import JobRunner
from worker.job_runner_factory import BaseJobRunnerFactory
from worker.loop import Loop
from worker.resources import LibrariesResource
class DummyJobRunner(JobRunner):
@staticmethod
def get_job_type() -> str:
return "dummy"
@staticmethod
def get_job_runner_version() -> int:
return 1
def compute(self) -> CompleteJobResult:
return CompleteJobResult({"key": "value"})
class DummyJobRunnerFactory(BaseJobRunnerFactory):
def __init__(
self, processing_graph: ProcessingGraph, processing_step: ProcessingStep, app_config: AppConfig
) -> None:
self.processing_step = processing_step
self.processing_graph = processing_graph
self.app_config = app_config
def _create_job_runner(self, job_info: JobInfo) -> JobRunner:
return DummyJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=self.processing_step,
)
def test_process_next_job(
test_processing_graph: ProcessingGraph,
test_processing_step: ProcessingStep,
app_config: AppConfig,
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
worker_state_file_path: str,
) -> None:
job_type = test_processing_step.job_type
app_config = replace(app_config, worker=replace(app_config.worker, job_types_only=[job_type]))
factory = DummyJobRunnerFactory(
processing_step=test_processing_step, processing_graph=test_processing_graph, app_config=app_config
)
loop = Loop(
job_runner_factory=factory,
library_cache_paths=libraries_resource.storage_paths,
app_config=app_config,
state_file_path=worker_state_file_path,
processing_graph=test_processing_graph,
)
assert not loop.process_next_job()
dataset = "dataset"
revision = "revision"
config = "config"
split = "split"
loop.queue.add_job(
job_type=job_type, dataset=dataset, revision=revision, config=config, split=split, difficulty=50
)
assert loop.queue.is_job_in_process(
job_type=job_type, dataset=dataset, revision=revision, config=config, split=split
)
assert loop.process_next_job()
assert not loop.queue.is_job_in_process(
job_type=job_type, dataset=dataset, revision=revision, config=config, split=split
)
| datasets-server-main | services/worker/tests/test_loop.py |
import os
import sys
import time
from collections.abc import Callable, Iterator
from datetime import timedelta
from http import HTTPStatus
from pathlib import Path
from unittest.mock import patch
import orjson
import pytest
import pytz
from filelock import FileLock
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import JobDocument, JobDoesNotExistError, Queue
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedResponseDocument
from libcommon.storage import StrPath
from libcommon.utils import JobInfo, Priority, Status, get_datetime
from mirakuru import ProcessExitedWithError, TimeoutExpired
from pytest import fixture
from worker.config import AppConfig
from worker.executor import WorkerExecutor
from worker.job_runner_factory import JobRunnerFactory
from worker.loop import WorkerState
from worker.resources import LibrariesResource
_TIME = int(os.environ.get("WORKER_TEST_TIME", int(time.time() * 10e3)))
def get_job_info(prefix: str = "base") -> JobInfo:
job_id = prefix.encode().hex()
assert len(job_id) <= 24, "please choose a smaller prefix"
return JobInfo(
job_id=job_id + "0" * (24 - len(job_id)),
type="dataset-config-names",
params={
"dataset": f"__DUMMY_DATASETS_SERVER_USER__/{prefix}_dataset_{_TIME}",
"revision": "revision",
"config": "default",
"split": "train",
},
priority=Priority.LOW,
difficulty=50,
)
def write_worker_state(worker_state: WorkerState, worker_state_file_path: str) -> None:
with FileLock(worker_state_file_path + ".lock"):
with open(worker_state_file_path, "wb") as worker_state_f:
worker_state_f.write(orjson.dumps(worker_state))
def start_worker_loop() -> None:
app_config = AppConfig.from_env()
if not app_config.worker.state_file_path:
raise ValueError("Failed to get worker state because 'state_file_path' is missing.")
if "--print-worker-state-path" in sys.argv:
print(app_config.worker.state_file_path, flush=True)
current_job_info = get_job_info()
worker_state = WorkerState(current_job_info=current_job_info, last_updated=get_datetime())
write_worker_state(worker_state, app_config.worker.state_file_path)
def start_worker_loop_that_crashes() -> None:
app_config = AppConfig.from_env()
if not app_config.worker.state_file_path:
raise ValueError("Failed to get worker state because 'state_file_path' is missing.")
if "--print-worker-state-path" in sys.argv:
print(app_config.worker.state_file_path, flush=True)
raise RuntimeError("Tried to run a bad worker loop")
def start_worker_loop_that_times_out() -> None:
time.sleep(20)
def start_worker_loop_with_long_job() -> None:
app_config = AppConfig.from_env()
if not app_config.worker.state_file_path:
raise ValueError("Failed to get worker state because 'state_file_path' is missing.")
if "--print-worker-state-path" in sys.argv:
print(app_config.worker.state_file_path, flush=True)
current_job_info = get_job_info("long")
with QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url):
current_job = JobDocument.objects(pk=current_job_info["job_id"]).get()
assert current_job.started_at is not None
worker_state = WorkerState(
current_job_info=current_job_info, last_updated=pytz.UTC.localize(current_job.started_at)
)
if current_job.status == Status.STARTED:
write_worker_state(worker_state, app_config.worker.state_file_path)
time.sleep(20)
Queue().finish_job(current_job_info["job_id"], is_success=True)
@fixture
def set_worker_state(worker_state_file_path: str) -> Iterator[WorkerState]:
job_info = get_job_info()
worker_state = WorkerState(current_job_info=job_info, last_updated=get_datetime())
write_worker_state(worker_state, worker_state_file_path)
yield worker_state
os.remove(worker_state_file_path)
@fixture
def set_just_started_job_in_queue(queue_mongo_resource: QueueMongoResource) -> Iterator[JobDocument]:
if not queue_mongo_resource.is_available():
raise RuntimeError("Mongo resource is not available")
job_info = get_job_info()
try:
JobDocument.get(job_id=job_info["job_id"]).delete()
except JobDoesNotExistError:
pass
created_at = get_datetime()
job = JobDocument(
pk=job_info["job_id"],
type=job_info["type"],
dataset=job_info["params"]["dataset"],
revision=job_info["params"]["revision"],
config=job_info["params"]["config"],
split=job_info["params"]["split"],
unicity_id="unicity_id",
namespace="user",
priority=job_info["priority"],
status=Status.STARTED,
created_at=created_at,
started_at=created_at + timedelta(microseconds=1),
difficulty=job_info["difficulty"],
)
job.save()
yield job
job.delete()
@fixture
def set_long_running_job_in_queue(
app_config: AppConfig, queue_mongo_resource: QueueMongoResource
) -> Iterator[JobDocument]:
if not queue_mongo_resource.is_available():
raise RuntimeError("Mongo resource is not available")
job_info = get_job_info("long")
try:
JobDocument.get(job_id=job_info["job_id"]).delete()
except JobDoesNotExistError:
pass
created_at = get_datetime() - timedelta(days=1)
last_heartbeat = get_datetime() - timedelta(seconds=app_config.worker.heartbeat_interval_seconds)
job = JobDocument(
pk=job_info["job_id"],
type=job_info["type"],
dataset=job_info["params"]["dataset"],
revision=job_info["params"]["revision"],
config=job_info["params"]["config"],
split=job_info["params"]["split"],
unicity_id="unicity_id",
namespace="user",
priority=job_info["priority"],
status=Status.STARTED,
created_at=created_at,
started_at=created_at + timedelta(milliseconds=1),
last_heartbeat=last_heartbeat,
difficulty=job_info["difficulty"],
)
job.save()
yield job
job.delete()
@fixture
def set_zombie_job_in_queue(queue_mongo_resource: QueueMongoResource) -> Iterator[JobDocument]:
if not queue_mongo_resource.is_available():
raise RuntimeError("Mongo resource is not available")
job_info = get_job_info("zombie")
try:
JobDocument.get(job_id=job_info["job_id"]).delete()
except JobDoesNotExistError:
pass
created_at = get_datetime() - timedelta(days=1)
job = JobDocument(
pk=job_info["job_id"],
type=job_info["type"],
dataset=job_info["params"]["dataset"],
revision=job_info["params"]["revision"],
config=job_info["params"]["config"],
split=job_info["params"]["split"],
unicity_id="unicity_id",
namespace="user",
priority=job_info["priority"],
status=Status.STARTED,
created_at=created_at,
started_at=created_at + timedelta(milliseconds=1),
last_heartbeat=created_at + timedelta(milliseconds=2),
difficulty=job_info["difficulty"],
)
job.save()
yield job
job.delete()
@fixture
def job_runner_factory(
app_config: AppConfig,
libraries_resource: LibrariesResource,
assets_directory: StrPath,
parquet_metadata_directory: StrPath,
duckdb_index_cache_directory: StrPath,
statistics_cache_directory: StrPath,
) -> JobRunnerFactory:
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
return JobRunnerFactory(
app_config=app_config,
processing_graph=processing_graph,
hf_datasets_cache=libraries_resource.hf_datasets_cache,
assets_directory=assets_directory,
parquet_metadata_directory=parquet_metadata_directory,
duckdb_index_cache_directory=duckdb_index_cache_directory,
statistics_cache_directory=statistics_cache_directory,
)
@fixture
def executor(
app_config: AppConfig, job_runner_factory: JobRunnerFactory, worker_state_file_path: str
) -> WorkerExecutor:
return WorkerExecutor(app_config, job_runner_factory, state_file_path=worker_state_file_path)
def test_executor_get_state(executor: WorkerExecutor, set_worker_state: WorkerState) -> None:
assert executor.get_state() == set_worker_state
def test_executor_get_empty_state(
executor: WorkerExecutor,
) -> None:
assert executor.get_state() is None
def test_executor_heartbeat(
executor: WorkerExecutor,
set_just_started_job_in_queue: JobDocument,
set_worker_state: WorkerState,
) -> None:
current_job = set_just_started_job_in_queue
assert current_job.last_heartbeat is None
executor.heartbeat()
current_job.reload()
assert current_job.last_heartbeat is not None
last_heartbeat_datetime = pytz.UTC.localize(current_job.last_heartbeat)
assert last_heartbeat_datetime >= get_datetime() - timedelta(seconds=1)
def test_executor_kill_zombies(
executor: WorkerExecutor,
set_just_started_job_in_queue: JobDocument,
set_long_running_job_in_queue: JobDocument,
set_zombie_job_in_queue: JobDocument,
tmp_dataset_repo_factory: Callable[[str], str],
cache_mongo_resource: CacheMongoResource,
) -> None:
zombie = set_zombie_job_in_queue
normal_job = set_just_started_job_in_queue
tmp_dataset_repo_factory(zombie.dataset)
try:
executor.kill_zombies()
assert JobDocument.objects(pk=zombie.pk).get().status in [Status.ERROR, Status.CANCELLED, Status.SUCCESS]
assert JobDocument.objects(pk=normal_job.pk).get().status == Status.STARTED
response = CachedResponseDocument.objects()[0]
expected_error = {
"error": "Job manager crashed while running this job (missing heartbeats).",
}
assert response.http_status == HTTPStatus.NOT_IMPLEMENTED
assert response.error_code == "JobManagerCrashedError"
assert response.dataset == zombie.dataset
assert response.config == zombie.config
assert response.split == zombie.split
assert response.content == expected_error
assert response.details == expected_error
finally:
CachedResponseDocument.objects().delete()
def test_executor_start(
executor: WorkerExecutor,
queue_mongo_resource: QueueMongoResource,
set_just_started_job_in_queue: JobDocument,
set_zombie_job_in_queue: JobDocument,
tmp_dataset_repo_factory: Callable[[str], str],
cache_mongo_resource: CacheMongoResource,
) -> None:
if not queue_mongo_resource.is_available():
raise RuntimeError("Mongo resource is not available")
zombie = set_zombie_job_in_queue
tmp_dataset_repo_factory(zombie.dataset)
# tmp_dataset_repo_factory(zombie.dataset)
with patch.object(executor, "heartbeat", wraps=executor.heartbeat) as heartbeat_mock:
with patch.object(executor, "kill_zombies", wraps=executor.kill_zombies) as kill_zombies_mock:
with patch("worker.executor.START_WORKER_LOOP_PATH", __file__), patch.dict(
os.environ, {"WORKER_TEST_TIME": str(_TIME)}
):
executor.start()
current_job = set_just_started_job_in_queue
assert current_job is not None
assert str(current_job.pk) == get_job_info()["job_id"]
assert heartbeat_mock.call_count > 0
assert JobDocument.objects(pk=set_just_started_job_in_queue.pk).get().last_heartbeat is not None
assert kill_zombies_mock.call_count > 0
assert JobDocument.objects(pk=set_zombie_job_in_queue.pk).get().status in [
Status.ERROR,
Status.CANCELLED,
Status.SUCCESS,
]
@pytest.mark.parametrize(
"bad_worker_loop_type", ["start_worker_loop_that_crashes", "start_worker_loop_that_times_out"]
)
def test_executor_raises_on_bad_worker(
executor: WorkerExecutor, queue_mongo_resource: QueueMongoResource, tmp_path: Path, bad_worker_loop_type: str
) -> None:
if not queue_mongo_resource.is_available():
raise RuntimeError("Mongo resource is not available")
bad_start_worker_loop_path = tmp_path / "bad_start_worker_loop.py"
with bad_start_worker_loop_path.open("w") as bad_start_worker_loop_f:
bad_start_worker_loop_f.write("raise RuntimeError('Tried to start a bad worker loop.')")
with patch.dict(os.environ, {"WORKER_LOOP_TYPE": bad_worker_loop_type}):
with patch("worker.executor.START_WORKER_LOOP_PATH", __file__), patch.dict(
os.environ, {"WORKER_TEST_TIME": str(_TIME)}
):
with pytest.raises((ProcessExitedWithError, TimeoutExpired)):
executor.start()
def test_executor_stops_on_long_job(
executor: WorkerExecutor,
queue_mongo_resource: QueueMongoResource,
cache_mongo_resource: CacheMongoResource,
tmp_dataset_repo_factory: Callable[[str], str],
set_long_running_job_in_queue: JobDocument,
set_just_started_job_in_queue: JobDocument,
) -> None:
if not queue_mongo_resource.is_available():
raise RuntimeError("Mongo resource is not available")
long_job = set_long_running_job_in_queue
normal_job = set_just_started_job_in_queue
tmp_dataset_repo_factory(long_job.dataset)
try:
with patch.dict(os.environ, {"WORKER_LOOP_TYPE": "start_worker_loop_with_long_job"}):
with patch.object(executor, "max_seconds_without_heartbeat_for_zombies", -1): # don't kill normal_job
with patch.object(
executor, "kill_long_job_interval_seconds", 0.1
): # make sure it has the time to kill the job
with patch("worker.executor.START_WORKER_LOOP_PATH", __file__), patch.dict(
os.environ, {"WORKER_TEST_TIME": str(_TIME)}
):
executor.start()
assert long_job is not None
assert str(long_job.pk) == get_job_info("long")["job_id"]
long_job.reload()
assert long_job.status in [Status.ERROR, Status.CANCELLED, Status.SUCCESS], "must be finished because too long"
responses = CachedResponseDocument.objects()
assert len(responses) == 1
response = responses[0]
expected_error = {
"error": "Job manager was killed while running this job (job exceeded maximum duration).",
}
assert response.http_status == HTTPStatus.NOT_IMPLEMENTED
assert response.error_code == "JobManagerExceededMaximumDurationError"
assert response.dataset == long_job.dataset
assert response.config == long_job.config
assert response.split == long_job.split
assert response.content == expected_error
assert response.details == expected_error
normal_job.reload()
assert normal_job.status == Status.STARTED, "must stay untouched"
finally:
CachedResponseDocument.objects().delete()
if __name__ == "__main__":
worker_loop_type = os.environ.get("WORKER_LOOP_TYPE", "start_worker_loop")
if worker_loop_type == "start_worker_loop_that_crashes":
start_worker_loop_that_crashes()
elif worker_loop_type == "start_worker_loop_that_times_out":
start_worker_loop_that_times_out()
elif worker_loop_type == "start_worker_loop_with_long_job":
start_worker_loop_with_long_job()
else:
start_worker_loop()
| datasets-server-main | services/worker/tests/test_executor.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/tests/job_runners/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Mapping
from http import HTTPStatus
from typing import Any, Optional, TypedDict
class _UpstreamResponse(TypedDict):
kind: str
dataset: str
http_status: HTTPStatus
content: Mapping[str, Any]
class UpstreamResponse(_UpstreamResponse, total=False):
config: Optional[str]
split: Optional[str]
progress: Optional[float]
| datasets-server-main | services/worker/tests/job_runners/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from pathlib import Path
from typing import Optional
import datasets.config
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_runners._job_runner_with_datasets_cache import (
JobRunnerWithDatasetsCache,
)
from worker.resources import LibrariesResource
from ..fixtures.hub import get_default_config_split
class DummyJobRunner(JobRunnerWithDatasetsCache):
@staticmethod
def get_job_type() -> str:
return "dummy-job-runner"
# ^ borrowing the type, so that the processing step exists and the job runner can be initialized
# refactoring libcommon.processing_graph might help avoiding this
@staticmethod
def get_job_runner_version() -> int:
return 1
def compute(self) -> CompleteJobResult:
return CompleteJobResult({"col1": "a" * 200})
GetJobRunner = Callable[[str, Optional[str], Optional[str], AppConfig], DummyJobRunner]
@pytest.fixture
def get_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: Optional[str],
split: Optional[str],
app_config: AppConfig,
) -> DummyJobRunner:
processing_step_name = DummyJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DummyJobRunner.get_job_runner_version(),
}
}
)
return DummyJobRunner(
job_info={
"type": DummyJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
def test_set_datasets_cache(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "dataset"
config, split = get_default_config_split()
job_runner = get_job_runner(dataset, config, split, app_config)
base_path = job_runner.base_cache_directory
dummy_path = base_path / "dummy"
job_runner.set_datasets_cache(dummy_path)
assert str(datasets.config.HF_DATASETS_CACHE).startswith(str(dummy_path))
def test_pre_compute_post_compute(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "user/dataset"
config, split = get_default_config_split()
job_runner = get_job_runner(dataset, config, split, app_config)
datasets_base_path = job_runner.base_cache_directory
job_runner.pre_compute()
datasets_cache_subdirectory = job_runner.cache_subdirectory
assert_datasets_cache_path(path=datasets_cache_subdirectory, exists=True)
assert str(datasets.config.HF_DATASETS_CACHE).startswith(str(datasets_base_path))
assert "dummy-job-runner-user-dataset" in str(datasets.config.HF_DATASETS_CACHE)
job_runner.post_compute()
assert_datasets_cache_path(path=datasets_base_path, exists=True)
assert_datasets_cache_path(path=datasets_cache_subdirectory, exists=False, equals=False)
def assert_datasets_cache_path(path: Optional[Path], exists: bool, equals: bool = True) -> None:
assert path is not None
assert path.exists() is exists
assert (datasets.config.HF_DATASETS_CACHE == path) is equals
assert (datasets.config.DOWNLOADED_DATASETS_PATH == path / datasets.config.DOWNLOADED_DATASETS_DIR) is equals
assert (datasets.config.EXTRACTED_DATASETS_PATH == path / datasets.config.EXTRACTED_DATASETS_DIR) is equals
| datasets-server-main | services/worker/tests/job_runners/test__job_runner_with_datasets_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import random
from collections.abc import Callable
from pathlib import Path
from typing import Optional
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_runners._job_runner_with_cache import JobRunnerWithCache
from worker.resources import LibrariesResource
from ..fixtures.hub import get_default_config_split
class DummyJobRunner(JobRunnerWithCache):
@staticmethod
def get_job_type() -> str:
return "dummy-job-runner"
@staticmethod
def get_job_runner_version() -> int:
return 1
def compute(self) -> CompleteJobResult:
return CompleteJobResult({"col1": "a" * 200})
GetJobRunner = Callable[[str, Optional[str], Optional[str], AppConfig], DummyJobRunner]
@pytest.fixture
def get_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: Optional[str],
split: Optional[str],
app_config: AppConfig,
) -> DummyJobRunner:
processing_step_name = DummyJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DummyJobRunner.get_job_runner_version(),
}
}
)
return DummyJobRunner(
job_info={
"type": DummyJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
cache_directory=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config,split,expected",
[
("user/dataset", "config", "split", "64218998941645-dummy-job-runner-user-dataset-da67625f"),
# Every parameter variation changes the hash, hence the subdirectory
("user/dataset", None, "split", "64218998941645-dummy-job-runner-user-dataset-498c21fa"),
("user/dataset", "config2", "split", "64218998941645-dummy-job-runner-user-dataset-1c4f24f2"),
("user/dataset", "config", None, "64218998941645-dummy-job-runner-user-dataset-a87e8dc2"),
("user/dataset", "config", "split2", "64218998941645-dummy-job-runner-user-dataset-f169bd48"),
# The subdirectory length is truncated, and it always finishes with the hash
(
"very_long_dataset_name_0123456789012345678901234567890123456789012345678901234567890123456789",
"config",
"split",
"64218998941645-dummy-job-runner-very_long_dataset_name_012345678-25cb8442",
),
],
)
def test_get_cache_subdirectory(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config: Optional[str],
split: Optional[str],
expected: str,
) -> None:
job_runner = get_job_runner(dataset, config, split, app_config)
random.seed(0)
assert job_runner.get_cache_subdirectory() == expected
def test_pre_compute_post_compute(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "user/dataset"
config, split = get_default_config_split()
job_runner = get_job_runner(dataset, config, split, app_config)
datasets_base_path = job_runner.base_cache_directory
job_runner.pre_compute()
datasets_cache_subdirectory = job_runner.cache_subdirectory
assert_datasets_cache_path(path=datasets_cache_subdirectory, exists=True)
job_runner.post_compute()
assert_datasets_cache_path(path=datasets_base_path, exists=True)
assert_datasets_cache_path(path=datasets_cache_subdirectory, exists=False)
def assert_datasets_cache_path(path: Optional[Path], exists: bool) -> None:
assert path is not None
assert path.exists() is exists
| datasets-server-main | services/worker/tests/job_runners/test__job_runner_with_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from dataclasses import replace
from http import HTTPStatus
import pytest
from datasets.packaged_modules import csv
from libcommon.exceptions import CustomError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.storage import StrPath
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.split.first_rows_from_streaming import (
SplitFirstRowsFromStreamingJobRunner,
)
from worker.resources import LibrariesResource
from worker.utils import get_json_size
from ...fixtures.hub import HubDatasetTest, get_default_config_split
GetJobRunner = Callable[[str, str, str, AppConfig], SplitFirstRowsFromStreamingJobRunner]
@pytest.fixture
def get_job_runner(
assets_directory: StrPath,
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitFirstRowsFromStreamingJobRunner:
processing_step_name = SplitFirstRowsFromStreamingJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
"config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": SplitFirstRowsFromStreamingJobRunner.get_job_runner_version(),
"triggered_by": "config-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitFirstRowsFromStreamingJobRunner(
job_info={
"type": SplitFirstRowsFromStreamingJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
assets_directory=assets_directory,
)
return _get_job_runner
def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public_csv: str) -> None:
dataset = hub_public_csv
config, split = get_default_config_split()
job_runner = get_job_runner(dataset, config, split, app_config)
response = job_runner.compute()
assert response
content = response.content
assert content
assert content["features"][0]["feature_idx"] == 0
assert content["features"][0]["name"] == "col_1"
assert content["features"][0]["type"]["_type"] == "Value"
assert content["features"][0]["type"]["dtype"] == "int64" # <---|
assert content["features"][1]["type"]["dtype"] == "int64" # <---|- auto-detected by the datasets library
assert content["features"][2]["type"]["dtype"] == "float64" # <-|
@pytest.mark.parametrize(
"name,use_token,exception_name,cause",
[
("public", False, None, None),
("audio", False, None, None),
("image", False, None, None),
("images_list", False, None, None),
("jsonl", False, None, None),
("gated", True, None, None),
("private", True, None, None),
# should we really test the following cases?
# The assumption is that the dataset exists and is accessible with the token
("gated", False, "InfoError", "FileNotFoundError"),
("private", False, "InfoError", "FileNotFoundError"),
],
)
def test_number_rows(
hub_responses_public: HubDatasetTest,
hub_responses_audio: HubDatasetTest,
hub_responses_image: HubDatasetTest,
hub_responses_images_list: HubDatasetTest,
hub_reponses_jsonl: HubDatasetTest,
hub_responses_gated: HubDatasetTest,
hub_responses_private: HubDatasetTest,
hub_responses_empty: HubDatasetTest,
hub_responses_does_not_exist_config: HubDatasetTest,
hub_responses_does_not_exist_split: HubDatasetTest,
get_job_runner: GetJobRunner,
name: str,
use_token: bool,
exception_name: str,
cause: str,
app_config: AppConfig,
) -> None:
# temporary patch to remove the effect of
# https://github.com/huggingface/datasets/issues/4875#issuecomment-1280744233
# note: it fixes the tests, but it does not fix the bug in the "real world"
if hasattr(csv, "_patched_for_streaming") and csv._patched_for_streaming:
csv._patched_for_streaming = False
hub_datasets = {
"public": hub_responses_public,
"audio": hub_responses_audio,
"image": hub_responses_image,
"images_list": hub_responses_images_list,
"jsonl": hub_reponses_jsonl,
"gated": hub_responses_gated,
"private": hub_responses_private,
"empty": hub_responses_empty,
"does_not_exist_config": hub_responses_does_not_exist_config,
"does_not_exist_split": hub_responses_does_not_exist_split,
}
dataset = hub_datasets[name]["name"]
expected_first_rows_response = hub_datasets[name]["first_rows_response"]
config, split = get_default_config_split()
job_runner = get_job_runner(
dataset,
config,
split,
app_config if use_token else replace(app_config, common=replace(app_config.common, hf_token=None)),
)
if exception_name is None:
job_runner.validate()
result = job_runner.compute().content
assert result == expected_first_rows_response
else:
with pytest.raises(Exception) as exc_info:
job_runner.validate()
job_runner.compute()
assert exc_info.typename == exception_name
@pytest.mark.parametrize(
"name,rows_max_bytes,columns_max_number,error_code,truncated",
[
# not-truncated public response is 687 bytes
("public", 10, 1_000, "TooBigContentError", False), # too small limit, even with truncation
("public", 1_000, 1_000, None, False), # not truncated
("public", 1_000, 1, "TooManyColumnsError", False), # too small columns limit
# not-truncated big response is 5_885_989 bytes
("big", 10, 1_000, "TooBigContentError", False), # too small limit, even with truncation
("big", 1_000, 1_000, None, True), # truncated successfully
("big", 10_000_000, 1_000, None, False), # not truncated
],
)
def test_from_streaming_truncation(
hub_public_csv: str,
hub_public_big: str,
get_job_runner: GetJobRunner,
app_config: AppConfig,
name: str,
rows_max_bytes: int,
columns_max_number: int,
error_code: str,
truncated: bool,
) -> None:
dataset = hub_public_csv if name == "public" else hub_public_big
config, split = get_default_config_split()
job_runner = get_job_runner(
dataset,
config,
split,
replace(
app_config,
common=replace(app_config.common, hf_token=None),
first_rows=replace(
app_config.first_rows,
max_number=1_000_000,
min_number=10,
max_bytes=rows_max_bytes,
min_cell_bytes=10,
columns_max_number=columns_max_number,
),
),
)
if error_code:
with pytest.raises(CustomError) as error_info:
job_runner.compute()
assert error_info.value.code == error_code
else:
response = job_runner.compute().content
assert get_json_size(response) <= rows_max_bytes
assert response["truncated"] == truncated
| datasets-server-main | services/worker/tests/job_runners/split/test_first_rows_from_streaming.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import os
from collections.abc import Callable
from dataclasses import replace
from http import HTTPStatus
from typing import Optional
import duckdb
import pandas as pd
import pytest
import requests
from datasets import Features, Image, Sequence, Value
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.storage import StrPath
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.parquet_and_info import ConfigParquetAndInfoJobRunner
from worker.job_runners.split.duckdb_index import (
CREATE_INDEX_COMMAND,
CREATE_SEQUENCE_COMMAND,
CREATE_TABLE_COMMAND,
SplitDuckDbIndexJobRunner,
get_indexable_columns,
)
from worker.resources import LibrariesResource
from ...fixtures.hub import HubDatasetTest
GetJobRunner = Callable[[str, str, str, AppConfig], SplitDuckDbIndexJobRunner]
GetParquetJobRunner = Callable[[str, str, AppConfig], ConfigParquetAndInfoJobRunner]
@pytest.fixture
def get_job_runner(
duckdb_index_cache_directory: StrPath,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitDuckDbIndexJobRunner:
processing_step_name = SplitDuckDbIndexJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-step": {"input_type": "dataset"},
"config-parquet": {
"input_type": "config",
"triggered_by": "dataset-step",
"provides_config_parquet": True,
},
"config-split-names-from-streaming": {
"input_type": "config",
"triggered_by": "dataset-step",
},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": SplitDuckDbIndexJobRunner.get_job_runner_version(),
"triggered_by": ["config-parquet", "config-split-names-from-streaming"],
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitDuckDbIndexJobRunner(
job_info={
"type": SplitDuckDbIndexJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
duckdb_index_cache_directory=duckdb_index_cache_directory,
)
return _get_job_runner
@pytest.fixture
def get_parquet_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetParquetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigParquetAndInfoJobRunner:
processing_step_name = ConfigParquetAndInfoJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "config",
"job_runner_version": ConfigParquetAndInfoJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigParquetAndInfoJobRunner(
job_info={
"type": ConfigParquetAndInfoJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
@pytest.mark.parametrize(
"hub_dataset_name,max_parquet_size_bytes,expected_error_code",
[
("duckdb_index", None, None),
("partial_duckdb_index", None, None),
("gated", None, None),
("duckdb_index", 1_000, "SplitWithTooBigParquetError"), # parquet size is 2812
("public", None, "NoIndexableColumnsError"), # dataset does not have string columns to index
],
)
def test_compute(
get_parquet_job_runner: GetParquetJobRunner,
get_job_runner: GetJobRunner,
app_config: AppConfig,
hub_responses_public: HubDatasetTest,
hub_responses_duckdb_index: HubDatasetTest,
hub_responses_gated_duckdb_index: HubDatasetTest,
hub_dataset_name: str,
max_parquet_size_bytes: Optional[int],
expected_error_code: str,
) -> None:
hub_datasets = {
"public": hub_responses_public,
"duckdb_index": hub_responses_duckdb_index,
"partial_duckdb_index": hub_responses_duckdb_index,
"gated": hub_responses_gated_duckdb_index,
}
dataset = hub_datasets[hub_dataset_name]["name"]
config = hub_datasets[hub_dataset_name]["config_names_response"]["config_names"][0]["config"]
split = "train"
partial = hub_dataset_name.startswith("partial_")
app_config = (
app_config
if max_parquet_size_bytes is None
else replace(
app_config, duckdb_index=replace(app_config.duckdb_index, max_parquet_size_bytes=max_parquet_size_bytes)
)
)
app_config = (
app_config
if not partial
else replace(
app_config,
parquet_and_info=replace(
app_config.parquet_and_info, max_dataset_size=1, max_row_group_byte_size_for_copy=1
),
)
)
parquet_job_runner = get_parquet_job_runner(dataset, config, app_config)
parquet_response = parquet_job_runner.compute()
config_parquet = parquet_response.content
assert config_parquet["partial"] is partial
# TODO: simulate more than one parquet file to index
upsert_response(
"config-parquet-and-info",
dataset=dataset,
config=config,
http_status=HTTPStatus.OK,
content=config_parquet,
)
assert parquet_response
job_runner = get_job_runner(dataset, config, split, app_config)
job_runner.pre_compute()
if expected_error_code:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
job_runner.pre_compute()
response = job_runner.compute()
assert response
content = response.content
url = content["url"]
file_name = content["filename"]
features = content["features"]
assert isinstance(url, str)
if partial:
assert url.rsplit("/", 2)[1] == "partial-" + split
else:
assert url.rsplit("/", 2)[1] == split
assert file_name is not None
assert Features.from_dict(features) is not None
job_runner.post_compute()
# download locally duckdb index file
duckdb_file = requests.get(url, headers={"authorization": f"Bearer {app_config.common.hf_token}"})
with open(file_name, "wb") as f:
f.write(duckdb_file.content)
duckdb.execute("INSTALL 'fts';")
duckdb.execute("LOAD 'fts';")
con = duckdb.connect(file_name)
# validate number of inserted records
record_count = con.sql("SELECT COUNT(*) FROM data;").fetchall()
assert record_count is not None
assert isinstance(record_count, list)
assert record_count[0] == (5,)
# perform a search to validate fts feature
query = "Lord Vader"
result = con.execute(
"SELECT __hf_index_id, text FROM data WHERE fts_main_data.match_bm25(__hf_index_id, ?) IS NOT NULL;",
[query],
)
rows = result.df()
assert rows is not None
assert (rows["text"].eq("Vader turns round and round in circles as his ship spins into space.")).any()
assert (rows["text"].eq("The wingman spots the pirateship coming at him and warns the Dark Lord")).any()
assert (rows["text"].eq("We count thirty Rebel ships, Lord Vader.")).any()
assert (
rows["text"].eq(
"Grand Moff Tarkin and Lord Vader are interrupted in their discussion by the buzz of the comlink"
)
).any()
assert not (rows["text"].eq("There goes another one.")).any()
assert (rows["__hf_index_id"].isin([0, 2, 3, 4, 5, 7, 8, 9])).all()
con.close()
os.remove(file_name)
job_runner.post_compute()
@pytest.mark.parametrize(
"features, expected",
[
(Features({"col_1": Value("string"), "col_2": Value("int64")}), ["col_1"]),
(
Features(
{
"nested_1": [Value("string")],
"nested_2": Sequence(Value("string")),
"nested_3": Sequence({"foo": Value("string")}),
"nested_4": {"foo": Value("string"), "bar": Value("int64")},
"nested_int": [Value("int64")],
}
),
["nested_1", "nested_2", "nested_3", "nested_4"],
),
(Features({"col_1": Image()}), []),
],
)
def test_get_indexable_columns(features: Features, expected: list[str]) -> None:
indexable_columns = get_indexable_columns(features)
assert indexable_columns == expected
DATA = """Hello there !
General Kenobi.
You are a bold one.
Kill him !
...
Back away ! I will deal with this Jedi slime myself"""
FTS_COMMAND = (
"SELECT * EXCLUDE (__hf_fts_score) FROM (SELECT *, fts_main_data.match_bm25(__hf_index_id, ?) AS __hf_fts_score"
" FROM data) A WHERE __hf_fts_score IS NOT NULL ORDER BY __hf_index_id;"
)
@pytest.mark.parametrize(
"df, query, expected_ids",
[
(pd.DataFrame([{"line": line} for line in DATA.split("\n")]), "bold", [2]),
(pd.DataFrame([{"nested": [line]} for line in DATA.split("\n")]), "bold", [2]),
(pd.DataFrame([{"nested": {"foo": line}} for line in DATA.split("\n")]), "bold", [2]),
(pd.DataFrame([{"nested": [{"foo": line}]} for line in DATA.split("\n")]), "bold", [2]),
(pd.DataFrame([{"nested": [{"foo": line, "bar": 0}]} for line in DATA.split("\n")]), "bold", [2]),
],
)
def test_index_command(df: pd.DataFrame, query: str, expected_ids: list[int]) -> None:
columns = ",".join('"' + str(column) + '"' for column in df.columns)
duckdb.sql(CREATE_SEQUENCE_COMMAND)
duckdb.sql(CREATE_TABLE_COMMAND.format(columns=columns) + " df;")
duckdb.sql(CREATE_INDEX_COMMAND.format(columns=columns))
result = duckdb.execute(FTS_COMMAND, parameters=[query]).df()
assert list(result.__hf_index_id) == expected_ids
| datasets-server-main | services/worker/tests/job_runners/split/test_duckdb_index.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Callable, Mapping
from http import HTTPStatus
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from datasets import Dataset
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.storage import StrPath
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.parquet_and_info import ConfigParquetAndInfoJobRunner
from worker.job_runners.split.descriptive_statistics import (
DECIMALS,
ColumnType,
SplitDescriptiveStatisticsJobRunner,
generate_bins,
)
from worker.resources import LibrariesResource
from ...fixtures.hub import HubDatasetTest
GetJobRunner = Callable[[str, str, str, AppConfig], SplitDescriptiveStatisticsJobRunner]
GetParquetAndInfoJobRunner = Callable[[str, str, AppConfig], ConfigParquetAndInfoJobRunner]
@pytest.fixture
def get_job_runner(
statistics_cache_directory: StrPath,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitDescriptiveStatisticsJobRunner:
processing_step_name = SplitDescriptiveStatisticsJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-config-names": {"input_type": "dataset"},
"config-split-names-from-info": {
"input_type": "config",
"triggered_by": "dataset-config-names",
},
processing_step_name: {
"input_type": "split",
"job_runner_version": SplitDescriptiveStatisticsJobRunner.get_job_runner_version(),
"triggered_by": ["config-split-names-from-info"],
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitDescriptiveStatisticsJobRunner(
job_info={
"type": SplitDescriptiveStatisticsJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 100,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
statistics_cache_directory=statistics_cache_directory,
)
return _get_job_runner
@pytest.fixture
def get_parquet_and_info_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetParquetAndInfoJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigParquetAndInfoJobRunner:
processing_step_name = ConfigParquetAndInfoJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-config-names": {"input_type": "dataset"},
processing_step_name: {
"input_type": "config",
"job_runner_version": ConfigParquetAndInfoJobRunner.get_job_runner_version(),
"triggered_by": "dataset-config-names",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigParquetAndInfoJobRunner(
job_info={
"type": ConfigParquetAndInfoJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 100,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
def count_expected_statistics_for_numerical_column(
column: pd.Series, column_name: str, dtype: ColumnType # type: ignore
) -> dict: # type: ignore
minimum, maximum, mean, median, std = (
column.min(),
column.max(),
column.mean(),
column.median(),
column.std(),
)
n_samples = column.shape[0]
nan_count = column.isna().sum()
if dtype is ColumnType.FLOAT:
hist, bin_edges = np.histogram(column[~column.isna()])
bin_edges = bin_edges.astype(float).round(DECIMALS).tolist()
else:
# TODO: n_bins is hardcoded here but should be fetched from the app_config.descriptive_statistics_config
bins = generate_bins(minimum, maximum, column_name=column_name, column_type=dtype, n_bins=10)
hist, bin_edges = np.histogram(column[~column.isna()], np.append(bins.bin_min, maximum))
bin_edges = bin_edges.astype(int).tolist()
hist = hist.astype(int).tolist()
if dtype is ColumnType.FLOAT:
minimum = minimum.astype(float).round(DECIMALS).item()
maximum = maximum.astype(float).round(DECIMALS).item()
mean = mean.astype(float).round(DECIMALS).item() # type: ignore
median = median.astype(float).round(DECIMALS).item() # type: ignore
std = std.astype(float).round(DECIMALS).item() # type: ignore
else:
mean, median, std = list(np.round([mean, median, std], DECIMALS))
return {
"nan_count": nan_count,
"nan_proportion": np.round(nan_count / n_samples, DECIMALS).item() if nan_count else 0.0,
"min": minimum,
"max": maximum,
"mean": mean,
"median": median,
"std": std,
"histogram": {
"hist": hist,
"bin_edges": bin_edges,
},
}
def count_expected_statistics_for_categorical_column(
column: pd.Series, class_labels: list[str] # type: ignore
) -> dict: # type: ignore
n_samples = column.shape[0]
nan_count = column.isna().sum()
value_counts = column.value_counts().to_dict()
n_unique = len(value_counts)
frequencies = {class_labels[int(class_id)]: class_count for class_id, class_count in value_counts.items()}
return {
"nan_count": nan_count,
"nan_proportion": np.round(nan_count / n_samples, DECIMALS).item() if nan_count else 0.0,
"n_unique": n_unique,
"frequencies": frequencies,
}
@pytest.fixture
def descriptive_statistics_expected(datasets: Mapping[str, Dataset]) -> dict: # type: ignore
ds = datasets["descriptive_statistics"]
df = ds.to_pandas()
expected_statistics = {}
for column_name in df.columns:
if column_name.startswith("int_"):
column_type = ColumnType.INT
elif column_name.startswith("float_"):
column_type = ColumnType.FLOAT
elif column_name.startswith("class_label"):
column_type = ColumnType.CLASS_LABEL
else:
continue
if column_type in [ColumnType.FLOAT, ColumnType.INT]:
column_stats = count_expected_statistics_for_numerical_column(
df[column_name], column_name=column_name, dtype=column_type
)
if sum(column_stats["histogram"]["hist"]) != df.shape[0] - column_stats["nan_count"]:
raise ValueError(column_name, column_stats)
expected_statistics[column_name] = {
"column_name": column_name,
"column_type": column_type,
"column_statistics": column_stats,
}
elif column_type is ColumnType.CLASS_LABEL:
class_labels = ds.features[column_name].names
column_stats = count_expected_statistics_for_categorical_column(df[column_name], class_labels=class_labels)
expected_statistics[column_name] = {
"column_name": column_name,
"column_type": column_type,
"column_statistics": column_stats,
}
return expected_statistics
@pytest.mark.parametrize(
"hub_dataset_name,expected_error_code",
[
("descriptive_statistics", None),
("gated", None),
("audio", "NoSupportedFeaturesError"),
("big", "SplitWithTooBigParquetError"),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
get_parquet_and_info_job_runner: GetParquetAndInfoJobRunner,
hub_responses_descriptive_statistics: HubDatasetTest,
hub_responses_gated_descriptive_statistics: HubDatasetTest,
hub_responses_audio: HubDatasetTest,
hub_responses_big: HubDatasetTest,
hub_dataset_name: str,
expected_error_code: Optional[str],
descriptive_statistics_expected: dict, # type: ignore
) -> None:
hub_datasets = {
"descriptive_statistics": hub_responses_descriptive_statistics,
"gated": hub_responses_gated_descriptive_statistics,
"audio": hub_responses_audio,
"big": hub_responses_big,
}
dataset = hub_datasets[hub_dataset_name]["name"]
splits_response = hub_datasets[hub_dataset_name]["splits_response"]
config, split = splits_response["splits"][0]["config"], splits_response["splits"][0]["split"]
# computing and pushing real parquet files because we need them for stats computation
parquet_job_runner = get_parquet_and_info_job_runner(dataset, config, app_config)
parquet_and_info_response = parquet_job_runner.compute()
upsert_response(
"config-parquet-and-info",
dataset=dataset,
config=config,
http_status=HTTPStatus.OK,
content=parquet_and_info_response.content,
)
assert parquet_and_info_response
job_runner = get_job_runner(dataset, config, split, app_config)
job_runner.pre_compute()
if expected_error_code:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
response = job_runner.compute()
assert sorted(response.content.keys()) == ["num_examples", "statistics"]
assert response.content["num_examples"] == 20
response_statistics = response.content["statistics"]
assert len(response_statistics) == len(descriptive_statistics_expected)
assert set([column_response["column_name"] for column_response in response_statistics]) == set(
descriptive_statistics_expected.keys()
) # assert returned features are as expected
for column_response_statistics in response_statistics:
assert_statistics_equal(
column_response_statistics, descriptive_statistics_expected[column_response_statistics["column_name"]]
)
def assert_statistics_equal(response: dict, expected: dict) -> None: # type: ignore
"""
Check that all values are equal or in case of float - almost equal.
We use np.isclose because of small possible mismatches
between numpy (which is used for counting expected values) and python float rounding.
"""
assert response["column_name"] == expected["column_name"]
assert response["column_type"] == expected["column_type"]
response_stats, expected_stats = response["column_statistics"], expected["column_statistics"]
assert response_stats.keys() == expected_stats.keys()
if response["column_type"] is ColumnType.FLOAT:
assert np.isclose(
response_stats["histogram"]["bin_edges"], expected_stats["histogram"]["bin_edges"], 1e-3
).all()
assert np.isclose(response_stats["min"], expected_stats["min"], 1e-3)
assert np.isclose(response_stats["max"], expected_stats["max"], 1e-3)
assert np.isclose(response_stats["mean"], expected_stats["mean"], 1e-3)
assert np.isclose(response_stats["median"], expected_stats["median"], 1e-3)
assert np.isclose(response_stats["std"], expected_stats["std"], 1e-3)
assert np.isclose(response_stats["nan_proportion"], expected_stats["nan_proportion"], 1e-3)
assert response_stats["nan_count"] == expected_stats["nan_count"]
assert response_stats["histogram"]["hist"] == expected_stats["histogram"]["hist"]
elif response["column_type"] is ColumnType.INT:
assert np.isclose(response_stats["mean"], expected_stats["mean"], 1e-3)
assert np.isclose(response_stats["median"], expected_stats["median"], 1e-3)
assert np.isclose(response_stats["std"], expected_stats["std"], 1e-3)
assert np.isclose(response_stats["nan_proportion"], expected_stats["nan_proportion"], 1e-3)
assert response_stats["min"] == expected_stats["min"]
assert response_stats["max"] == expected_stats["max"]
assert response_stats["nan_count"] == expected_stats["nan_count"]
assert response_stats["histogram"] == expected_stats["histogram"]
elif response["column_type"] is ColumnType.CLASS_LABEL:
assert np.isclose(response_stats["nan_proportion"], expected_stats["nan_proportion"], 1e-3)
assert response_stats["nan_count"] == expected_stats["nan_count"]
assert response_stats["n_unique"] == expected_stats["n_unique"]
assert response_stats["frequencies"] == expected_stats["frequencies"]
else:
raise ValueError("Incorrect data type")
| datasets-server-main | services/worker/tests/job_runners/split/test_descriptive_statistics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Callable, Mapping
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.constants import (
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
)
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import ImageUrlColumnsResponse
from worker.job_runners.split.image_url_columns import SplitImageUrlColumnsJobRunner
from ...fixtures.hub import get_default_config_split
GetJobRunner = Callable[[str, str, str, AppConfig], SplitImageUrlColumnsJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitImageUrlColumnsJobRunner:
processing_step_name = SplitImageUrlColumnsJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
"config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": SplitImageUrlColumnsJobRunner.get_job_runner_version(),
"triggered_by": "config-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitImageUrlColumnsJobRunner(
job_info={
"type": SplitImageUrlColumnsJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
FIRST_ROWS_WITHOUT_STR_COLUMNS = {
"features": [
{
"feature_idx": 0,
"name": "col1",
"type": {
"dtype": "int64",
"_type": "Value",
},
},
{
"feature_idx": 1,
"name": "col2",
"type": {
"dtype": "float",
"_type": "Value",
},
},
],
"rows": [],
}
FIRST_ROWS_WITHOUT_IMAGE_URL_COLUMNS = {
"features": [
{
"feature_idx": 0,
"name": "col1",
"type": {
"dtype": "string",
"_type": "Value",
},
},
],
"rows": [
{"row_idx": 0, "row": {"col": "http://testurl.test/test_document.txt"}, "truncated_cells": []},
{"row_idx": 1, "row": {"col": "http://testurl.test/test"}, "truncated_cells": []},
],
}
FIRST_ROWS_WITH_IMAGE_URL_COLUMNS = {
"features": [
{
"feature_idx": 0,
"name": "col",
"type": {
"dtype": "string",
"_type": "Value",
},
},
{
"feature_idx": 1,
"name": "col1",
"type": {
"dtype": "string",
"_type": "Value",
},
},
],
"rows": [
{"row_idx": 0, "row": {"col": "http://testurl.test/test_image.jpg", "col1": ""}, "truncated_cells": []},
{"row_idx": 1, "row": {"col": "http://testurl.test/test_image2.jpg"}, "col1": "text", "truncated_cells": []},
{"row_idx": 2, "row": {"col": "other", "col1": "text"}, "truncated_cells": []},
{"row_idx": 1, "row": {"col": "http://testurl.test/test_image3.png", "col1": "text"}, "truncated_cells": []},
],
}
FIRST_ROWS_WITH_IMAGE_URL_COLUMNS_NO_ROWS = {
"features": [
{
"feature_idx": 0,
"name": "col",
"type": {
"dtype": "string",
"_type": "Value",
},
},
],
"rows": [],
}
DEFAULT_EMPTY_CONTENT: ImageUrlColumnsResponse = {"columns": []}
@pytest.mark.parametrize(
"dataset,upstream_content,expected_content",
[
(
"no_str_columns",
FIRST_ROWS_WITHOUT_STR_COLUMNS,
DEFAULT_EMPTY_CONTENT,
),
(
"no_image_url_columns",
FIRST_ROWS_WITHOUT_IMAGE_URL_COLUMNS,
DEFAULT_EMPTY_CONTENT,
),
(
"image_url_columns",
FIRST_ROWS_WITH_IMAGE_URL_COLUMNS,
{"columns": ["col"]},
),
(
"image_url_columns_no_rows",
FIRST_ROWS_WITH_IMAGE_URL_COLUMNS_NO_ROWS,
DEFAULT_EMPTY_CONTENT,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
upstream_content: Mapping[str, Any],
expected_content: Mapping[str, Any],
) -> None:
config, split = get_default_config_split()
job_runner = get_job_runner(
dataset,
config,
split,
app_config,
)
upsert_response(
kind="split-first-rows-from-streaming",
dataset=dataset,
config=config,
split=split,
content=upstream_content,
dataset_git_revision="dataset_git_revision",
job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
progress=1.0,
http_status=HTTPStatus.OK,
)
response = job_runner.compute()
assert response
assert response.content == expected_content
@pytest.mark.parametrize(
"dataset,upstream_content,upstream_status,exception_name",
[
("doesnotexist", {}, HTTPStatus.OK, "CachedArtifactNotFoundError"),
("wrong_format", {}, HTTPStatus.OK, "PreviousStepFormatError"),
(
"upstream_failed",
{},
HTTPStatus.INTERNAL_SERVER_ERROR,
"CachedArtifactError",
),
],
)
def test_compute_failed(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
upstream_content: Mapping[str, Any],
upstream_status: HTTPStatus,
exception_name: str,
) -> None:
config, split = get_default_config_split()
job_runner = get_job_runner(
dataset,
config,
split,
app_config,
)
if dataset != "doesnotexist":
upsert_response(
kind="split-first-rows-from-streaming",
dataset=dataset,
config=config,
split=split,
content=upstream_content,
dataset_git_revision="dataset_git_revision",
job_runner_version=PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
progress=1.0,
http_status=upstream_status,
)
with pytest.raises(Exception) as exc_info:
job_runner.compute()
assert exc_info.typename == exception_name
| datasets-server-main | services/worker/tests/job_runners/split/test_image_url_columns.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/tests/job_runners/split/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from http import HTTPStatus
from typing import Optional
import pytest
from libcommon.exceptions import CustomError
from libcommon.processing_graph import ProcessingStep
from libcommon.resources import CacheMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_runners.split.split_job_runner import SplitJobRunner
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
class DummySplitJobRunner(SplitJobRunner):
@staticmethod
def get_job_runner_version() -> int:
return 1
@staticmethod
def get_job_type() -> str:
return "/dummy"
def compute(self) -> CompleteJobResult:
return CompleteJobResult({"key": "value"})
@pytest.mark.parametrize("config,split", [(None, None), (None, "split"), ("config", None)])
def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppConfig, config: str, split: str) -> None:
upsert_response(
kind="dataset-config-names",
dataset="dataset",
content={"config_names": [{"dataset": "dataset", "config": config}]},
http_status=HTTPStatus.OK,
)
with pytest.raises(CustomError) as exc_info:
DummySplitJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": "dataset",
"revision": "revision",
"config": config,
"split": split,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
).validate()
assert exc_info.value.code == "ParameterMissingError"
@pytest.mark.parametrize(
"upsert_config,upsert_split,exception_name",
[
("config", "split", None),
("config", "other_split", "SplitNotFoundError"),
("other_config", "split", "ConfigNotFoundError"),
],
)
def test_creation(
test_processing_step: ProcessingStep,
app_config: AppConfig,
upsert_config: str,
upsert_split: str,
exception_name: Optional[str],
) -> None:
dataset, config, split = "dataset", "config", "split"
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": upsert_config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": upsert_config, "split": upsert_split}]},
http_status=HTTPStatus.OK,
)
if exception_name is None:
DummySplitJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
).validate()
else:
with pytest.raises(CustomError) as exc_info:
DummySplitJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
).validate()
assert exc_info.value.code == exception_name
| datasets-server-main | services/worker/tests/job_runners/split/test_split_job_runner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedArtifactNotFoundError, upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.split.opt_in_out_urls_count import (
SplitOptInOutUrlsCountJobRunner,
)
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, str, AppConfig], SplitOptInOutUrlsCountJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitOptInOutUrlsCountJobRunner:
processing_step_name = SplitOptInOutUrlsCountJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
"config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
processing_step_name: {
"input_type": "split",
"job_runner_version": SplitOptInOutUrlsCountJobRunner.get_job_runner_version(),
"triggered_by": "config-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitOptInOutUrlsCountJobRunner(
job_info={
"type": SplitOptInOutUrlsCountJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config,split,upstream_status,upstream_content,expected_error_code,expected_content,should_raise",
[
(
"dataset_ok",
"config_ok",
"split_ok",
HTTPStatus.OK,
{
"has_urls_columns": True,
"num_scanned_rows": 4,
"opt_in_urls": [
{"url": "http://testurl.test/test_image3-optIn.jpg", "row_idx": 3, "column_name": "col"}
],
"opt_out_urls": [
{"url": "http://testurl.test/test_image-optOut.jpg", "row_idx": 0, "column_name": "col"}
],
"urls_columns": ["col"],
"num_opt_out_urls": 1,
"num_opt_in_urls": 1,
"num_urls": 4,
"full_scan": True,
},
None,
{
"has_urls_columns": True,
"num_scanned_rows": 4,
"urls_columns": ["col"],
"num_opt_out_urls": 1,
"num_opt_in_urls": 1,
"num_urls": 4,
"full_scan": True,
},
False,
),
(
"dataset_previous_step_error",
"config_previous_step_error",
"split_previous_step_error",
HTTPStatus.INTERNAL_SERVER_ERROR,
{},
"CachedArtifactError",
None,
True,
),
(
"dataset_format_error",
"config_format_error",
"split_format_error",
HTTPStatus.OK,
{"wrong_format": None},
"PreviousStepFormatError",
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config: str,
split: str,
upstream_status: HTTPStatus,
upstream_content: Any,
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
upsert_response(
kind="split-opt-in-out-urls-scan",
dataset=dataset,
config=config,
split=split,
content=upstream_content,
http_status=upstream_status,
)
job_runner = get_job_runner(dataset, config, split, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = config = split = "doesnotexist"
job_runner = get_job_runner(dataset, config, split, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from asyncio import Semaphore
from collections.abc import Callable, Mapping
from dataclasses import replace
from http import HTTPStatus
from typing import Any
from unittest.mock import patch
import pytest
from aiohttp import ClientSession
from aiolimiter import AsyncLimiter
from libcommon.constants import (
PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION,
)
from libcommon.exceptions import ExternalServerError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import ImageUrlColumnsResponse
from worker.job_runners.split.opt_in_out_urls_scan_from_streaming import (
SplitOptInOutUrlsScanJobRunner,
check_spawning,
)
from worker.resources import LibrariesResource
from ...constants import CI_SPAWNING_TOKEN
from ...fixtures.hub import HubDatasetTest, get_default_config_split
GetJobRunner = Callable[[str, str, str, AppConfig], SplitOptInOutUrlsScanJobRunner]
async def mock_check_spawning(
image_urls: list[str], session: ClientSession, semaphore: Semaphore, limiter: AsyncLimiter, url: str
) -> Any:
return {"urls": [{"url": url, "optIn": "optIn" in url, "optOut": "optOut" in url} for url in image_urls]}
@pytest.fixture
def get_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitOptInOutUrlsScanJobRunner:
processing_step_name = SplitOptInOutUrlsScanJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
"config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": SplitOptInOutUrlsScanJobRunner.get_job_runner_version(),
"triggered_by": "config-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitOptInOutUrlsScanJobRunner(
job_info={
"type": SplitOptInOutUrlsScanJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
IMAGE_URL_COLUMNS_RESPONSE_EMPTY: ImageUrlColumnsResponse = {"columns": []}
IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA: ImageUrlColumnsResponse = {"columns": ["col"]}
DEFAULT_EMPTY_RESPONSE = {
"has_urls_columns": False,
"num_scanned_rows": 0,
"opt_in_urls": [],
"opt_out_urls": [],
"urls_columns": [],
"num_opt_out_urls": 0,
"num_opt_in_urls": 0,
"num_urls": 0,
"full_scan": None,
}
@pytest.mark.parametrize(
"name,rows_max_number,upstream_content,expected_content",
[
(
"public",
100_000,
IMAGE_URL_COLUMNS_RESPONSE_EMPTY,
DEFAULT_EMPTY_RESPONSE,
),
(
"spawning_opt_in_out",
100_000, # dataset has less rows
IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
{
"has_urls_columns": True,
"num_scanned_rows": 4,
"opt_in_urls": [
{"url": "http://testurl.test/test_image3-optIn.png", "row_idx": 3, "column_name": "col"}
],
"opt_out_urls": [
{"url": "http://testurl.test/test_image-optOut.jpg", "row_idx": 0, "column_name": "col"}
],
"urls_columns": ["col"],
"num_opt_out_urls": 1,
"num_opt_in_urls": 1,
"num_urls": 4,
"full_scan": True,
},
),
(
"spawning_opt_in_out",
3, # dataset has more rows
IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
{
"has_urls_columns": True,
"num_scanned_rows": 3,
"opt_in_urls": [],
"opt_out_urls": [
{"url": "http://testurl.test/test_image-optOut.jpg", "row_idx": 0, "column_name": "col"}
],
"urls_columns": ["col"],
"num_opt_out_urls": 1,
"num_opt_in_urls": 0,
"num_urls": 3,
"full_scan": False,
},
),
(
"spawning_opt_in_out",
4, # dataset has same amount of rows
IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
{
"has_urls_columns": True,
"num_scanned_rows": 4,
"opt_in_urls": [
{"url": "http://testurl.test/test_image3-optIn.png", "row_idx": 3, "column_name": "col"}
],
"opt_out_urls": [
{"url": "http://testurl.test/test_image-optOut.jpg", "row_idx": 0, "column_name": "col"}
],
"urls_columns": ["col"],
"num_opt_out_urls": 1,
"num_opt_in_urls": 1,
"num_urls": 4,
"full_scan": True,
},
),
],
)
def test_compute(
hub_responses_public: HubDatasetTest,
hub_responses_spawning_opt_in_out: HubDatasetTest,
app_config: AppConfig,
get_job_runner: GetJobRunner,
name: str,
rows_max_number: int,
upstream_content: Mapping[str, Any],
expected_content: Mapping[str, Any],
) -> None:
hub_datasets = {"public": hub_responses_public, "spawning_opt_in_out": hub_responses_spawning_opt_in_out}
dataset = hub_datasets[name]["name"]
config, split = get_default_config_split()
job_runner = get_job_runner(
dataset,
config,
split,
replace(app_config, urls_scan=replace(app_config.urls_scan, rows_max_number=rows_max_number)),
)
upsert_response(
kind="split-image-url-columns",
dataset=dataset,
config=config,
split=split,
content=upstream_content,
dataset_git_revision="dataset_git_revision",
job_runner_version=PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
progress=1.0,
http_status=HTTPStatus.OK,
)
with patch("worker.job_runners.split.opt_in_out_urls_scan_from_streaming.check_spawning", mock_check_spawning):
response = job_runner.compute()
assert response
assert response.content == expected_content
@pytest.mark.parametrize(
"dataset,columns_max_number,upstream_content,upstream_status,exception_name",
[
("doesnotexist", 10, {}, HTTPStatus.OK, "CachedArtifactNotFoundError"),
("wrong_format", 10, {}, HTTPStatus.OK, "PreviousStepFormatError"),
(
"upstream_failed",
10,
{},
HTTPStatus.INTERNAL_SERVER_ERROR,
"CachedArtifactError",
),
(
"info_error",
10,
IMAGE_URL_COLUMNS_RESPONSE_EMPTY,
HTTPStatus.OK,
"InfoError",
),
(
"too_many_columns",
0,
IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
HTTPStatus.OK,
"TooManyColumnsError",
),
],
)
def test_compute_failed(
app_config: AppConfig,
hub_responses_spawning_opt_in_out: HubDatasetTest,
get_job_runner: GetJobRunner,
dataset: str,
columns_max_number: int,
upstream_content: Mapping[str, Any],
upstream_status: HTTPStatus,
exception_name: str,
) -> None:
if dataset == "too_many_columns":
dataset = hub_responses_spawning_opt_in_out["name"]
config, split = get_default_config_split()
job_runner = get_job_runner(
dataset,
config,
split,
replace(app_config, urls_scan=replace(app_config.urls_scan, columns_max_number=columns_max_number)),
)
if dataset != "doesnotexist":
upsert_response(
kind="split-image-url-columns",
dataset=dataset,
config=config,
split=split,
content=upstream_content,
dataset_git_revision="dataset_git_revision",
job_runner_version=PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION,
progress=1.0,
http_status=upstream_status,
)
with pytest.raises(Exception) as exc_info:
job_runner.compute()
assert exc_info.typename == exception_name
def test_compute_error_from_spawning(
app_config: AppConfig,
get_job_runner: GetJobRunner,
hub_public_spawning_opt_in_out: str,
) -> None:
dataset = hub_public_spawning_opt_in_out
config, split = get_default_config_split()
job_runner = get_job_runner(
dataset,
config,
split,
replace(app_config, urls_scan=replace(app_config.urls_scan, spawning_url="wrong_url")),
)
upsert_response(
kind="split-image-url-columns",
dataset=dataset,
config=config,
split=split,
content=IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
dataset_git_revision="dataset_git_revision",
job_runner_version=PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION,
progress=1.0,
http_status=HTTPStatus.OK,
)
with pytest.raises(ExternalServerError):
job_runner.compute()
@pytest.mark.skip(
reason=(
"Temporarily disabled, we can't use secrets on fork repos. See"
" https://github.com/huggingface/datasets-server/issues/1085"
)
)
@pytest.mark.asyncio
async def test_real_check_spawning_response(app_config: AppConfig) -> None:
semaphore = Semaphore(value=10)
limiter = AsyncLimiter(10, time_period=1)
headers = {"Authorization": f"API {CI_SPAWNING_TOKEN}"}
async with ClientSession(headers=headers) as session:
image_url = "http://testurl.test/test_image.jpg"
image_urls = [image_url]
spawning_url = app_config.urls_scan.spawning_url
spawning_response = await check_spawning(image_urls, session, semaphore, limiter, spawning_url)
assert spawning_response and isinstance(spawning_response, dict)
assert spawning_response["urls"] and isinstance(spawning_response["urls"], list)
assert len(spawning_response["urls"]) == 2 # the API requires >1 urls
first_url = spawning_response["urls"][0]
assert first_url and isinstance(first_url, dict)
assert first_url["url"] and isinstance(first_url["url"], str)
assert first_url["url"] == image_url
| datasets-server-main | services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import os
from collections.abc import Callable, Generator
from dataclasses import replace
from http import HTTPStatus
from unittest.mock import patch
import pyarrow.parquet as pq
import pytest
from datasets import Dataset
from fsspec import AbstractFileSystem
from libcommon.exceptions import CustomError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.storage import StrPath
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.split.first_rows_from_parquet import (
SplitFirstRowsFromParquetJobRunner,
)
from worker.utils import get_json_size
GetJobRunner = Callable[[str, str, str, AppConfig], SplitFirstRowsFromParquetJobRunner]
@pytest.fixture
def get_job_runner(
assets_directory: StrPath,
parquet_metadata_directory: StrPath,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitFirstRowsFromParquetJobRunner:
processing_step_name = SplitFirstRowsFromParquetJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
"config-level": {
"input_type": "config",
"triggered_by": "dataset-level",
"provides_config_parquet_metadata": True,
},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": SplitFirstRowsFromParquetJobRunner.get_job_runner_version(),
"triggered_by": "config-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitFirstRowsFromParquetJobRunner(
job_info={
"type": SplitFirstRowsFromParquetJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": split,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
processing_graph=processing_graph,
assets_directory=assets_directory,
parquet_metadata_directory=parquet_metadata_directory,
)
return _get_job_runner
@pytest.fixture
def ds() -> Dataset:
return Dataset.from_dict({"col1": [1, 2, 3], "col2": ["a", "b", "c"]})
@pytest.fixture
def ds_fs(ds: Dataset, tmpfs: AbstractFileSystem) -> Generator[AbstractFileSystem, None, None]:
with tmpfs.open("config/train/0000.parquet", "wb") as f:
ds.to_parquet(f)
yield tmpfs
@pytest.mark.parametrize(
"rows_max_bytes,columns_max_number,error_code",
[
(0, 10, "TooBigContentError"), # too small limit, even with truncation
(1_000, 1, "TooManyColumnsError"), # too small columns limit
(1_000, 10, None),
],
)
def test_compute(
ds: Dataset,
ds_fs: AbstractFileSystem,
parquet_metadata_directory: StrPath,
get_job_runner: GetJobRunner,
app_config: AppConfig,
rows_max_bytes: int,
columns_max_number: int,
error_code: str,
) -> None:
dataset, config, split = "dataset", "config", "split"
parquet_file = ds_fs.open("config/train/0000.parquet")
fake_url = (
"https://fake.huggingface.co/datasets/dataset/resolve/refs%2Fconvert%2Fparquet/config/train/0000.parquet"
)
fake_metadata_subpath = "fake-parquet-metadata/dataset/config/train/0000.parquet"
config_parquet_metadata_content = {
"parquet_files_metadata": [
{
"dataset": dataset,
"config": config,
"split": split,
"url": fake_url, # noqa: E501
"filename": "0000.parquet",
"size": parquet_file.size,
"num_rows": len(ds),
"parquet_metadata_subpath": fake_metadata_subpath,
}
]
}
upsert_response(
kind="config-level",
dataset=dataset,
config=config,
content=config_parquet_metadata_content,
http_status=HTTPStatus.OK,
)
parquet_metadata = pq.read_metadata(ds_fs.open("config/train/0000.parquet"))
with patch("libcommon.parquet_utils.HTTPFile", return_value=parquet_file) as mock_http_file, patch(
"pyarrow.parquet.read_metadata", return_value=parquet_metadata
) as mock_read_metadata, patch("pyarrow.parquet.read_schema", return_value=ds.data.schema) as mock_read_schema:
job_runner = get_job_runner(
dataset,
config,
split,
replace(
app_config,
common=replace(app_config.common, hf_token=None),
first_rows=replace(
app_config.first_rows,
max_number=1_000_000,
min_number=10,
max_bytes=rows_max_bytes,
min_cell_bytes=10,
columns_max_number=columns_max_number,
),
),
)
if error_code:
with pytest.raises(CustomError) as error_info:
job_runner.compute()
assert error_info.value.code == error_code
else:
response = job_runner.compute().content
assert get_json_size(response) <= rows_max_bytes
assert response
assert response["rows"]
assert response["features"]
assert len(response["rows"]) == 3 # testing file has 3 rows see config/train/0000.parquet file
assert len(response["features"]) == 2 # testing file has 2 columns see config/train/0000.parquet file
assert response["features"][0]["feature_idx"] == 0
assert response["features"][0]["name"] == "col1"
assert response["features"][0]["type"]["_type"] == "Value"
assert response["features"][0]["type"]["dtype"] == "int64"
assert response["features"][1]["feature_idx"] == 1
assert response["features"][1]["name"] == "col2"
assert response["features"][1]["type"]["_type"] == "Value"
assert response["features"][1]["type"]["dtype"] == "string"
assert response["rows"][0]["row_idx"] == 0
assert response["rows"][0]["truncated_cells"] == []
assert response["rows"][0]["row"] == {"col1": 1, "col2": "a"}
assert response["rows"][1]["row_idx"] == 1
assert response["rows"][1]["truncated_cells"] == []
assert response["rows"][1]["row"] == {"col1": 2, "col2": "b"}
assert response["rows"][2]["row_idx"] == 2
assert response["rows"][2]["truncated_cells"] == []
assert response["rows"][2]["row"] == {"col1": 3, "col2": "c"}
assert len(mock_http_file.call_args_list) == 1
assert mock_http_file.call_args_list[0][0][1] == fake_url
assert len(mock_read_metadata.call_args_list) == 1
assert mock_read_metadata.call_args_list[0][0][0] == os.path.join(
parquet_metadata_directory, fake_metadata_subpath
)
assert len(mock_read_schema.call_args_list) == 1
assert mock_read_schema.call_args_list[0][0][0] == os.path.join(
parquet_metadata_directory, fake_metadata_subpath
)
@pytest.mark.parametrize(
"rows_max_bytes,rows_min_number,rows_max_number,truncated",
[
(1_000, 10, 100, False), # no truncation
(1_000, 1, 2, True), # returns 2 rows at max, while the split has 3 rows
(250, 1, 100, True), # does not return the 3 rows, because it would be more than the max bytes
],
)
def test_from_parquet_truncation(
ds: Dataset,
ds_fs: AbstractFileSystem,
get_job_runner: GetJobRunner,
app_config: AppConfig,
rows_max_bytes: int,
rows_min_number: int,
rows_max_number: int,
truncated: bool,
) -> None:
dataset, config, split = "dataset", "config", "split"
parquet_file = ds_fs.open("config/train/0000.parquet")
fake_url = (
"https://fake.huggingface.co/datasets/dataset/resolve/refs%2Fconvert%2Fparquet/config/train/0000.parquet"
)
fake_metadata_subpath = "fake-parquet-metadata/dataset/config/train/0000.parquet"
config_parquet_metadata_content = {
"parquet_files_metadata": [
{
"dataset": dataset,
"config": config,
"split": split,
"url": fake_url, # noqa: E501
"filename": "0000.parquet",
"size": parquet_file.size,
"num_rows": len(ds),
"parquet_metadata_subpath": fake_metadata_subpath,
}
]
}
upsert_response(
kind="config-level",
dataset=dataset,
config=config,
content=config_parquet_metadata_content,
http_status=HTTPStatus.OK,
)
parquet_metadata = pq.read_metadata(ds_fs.open("config/train/0000.parquet"))
with patch("libcommon.parquet_utils.HTTPFile", return_value=parquet_file), patch(
"pyarrow.parquet.read_metadata", return_value=parquet_metadata
), patch("pyarrow.parquet.read_schema", return_value=ds.data.schema):
job_runner = get_job_runner(
dataset,
config,
split,
replace(
app_config,
common=replace(app_config.common, hf_token=None),
first_rows=replace(
app_config.first_rows,
max_number=rows_max_number,
min_number=rows_min_number,
max_bytes=rows_max_bytes,
min_cell_bytes=10,
columns_max_number=1_000,
),
),
)
response = job_runner.compute().content
assert response
assert response["truncated"] == truncated
assert response["rows"]
# testing file has 3 rows see config/train/0000.parquet file
if truncated:
assert len(response["rows"]) < 3
else:
assert len(response["rows"]) == 3
| datasets-server-main | services/worker/tests/job_runners/split/test_first_rows_from_parquet.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.split.is_valid import SplitIsValidJobRunner
from ..utils import UpstreamResponse
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, str, AppConfig], SplitIsValidJobRunner]
DATASET = "dataset"
CONFIG = "config"
SPLIT = "split"
UPSTREAM_RESPONSE_CONFIG_SIZE: UpstreamResponse = UpstreamResponse(
kind="config-size", dataset=DATASET, config=CONFIG, http_status=HTTPStatus.OK, content={}
)
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET: UpstreamResponse = UpstreamResponse(
kind="split-first-rows-from-parquet",
dataset=DATASET,
config=CONFIG,
split=SPLIT,
http_status=HTTPStatus.OK,
content={},
)
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING: UpstreamResponse = UpstreamResponse(
kind="split-first-rows-from-streaming",
dataset=DATASET,
config=CONFIG,
split=SPLIT,
http_status=HTTPStatus.OK,
content={},
)
UPSTREAM_RESPONSE_SPLIT_DUCKDB_INDEX: UpstreamResponse = UpstreamResponse(
kind="split-duckdb-index",
dataset=DATASET,
config=CONFIG,
split=SPLIT,
http_status=HTTPStatus.OK,
content={},
)
UPSTREAM_RESPONSE_CONFIG_SIZE_ERROR: UpstreamResponse = UpstreamResponse(
kind="config-size", dataset=DATASET, config=CONFIG, http_status=HTTPStatus.INTERNAL_SERVER_ERROR, content={}
)
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET_ERROR: UpstreamResponse = UpstreamResponse(
kind="split-first-rows-from-parquet",
dataset=DATASET,
config=CONFIG,
split=SPLIT,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
)
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING_ERROR: UpstreamResponse = UpstreamResponse(
kind="split-first-rows-from-streaming",
dataset=DATASET,
config=CONFIG,
split=SPLIT,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
)
UPSTREAM_RESPONSE_SPLIT_DUCKDB_INDEX_ERROR: UpstreamResponse = UpstreamResponse(
kind="split-duckdb-index",
dataset=DATASET,
config=CONFIG,
split=SPLIT,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
)
EXPECTED_ERROR = (
{"viewer": False, "preview": False, "search": False},
1.0,
)
EXPECTED_VIEWER_OK = (
{"viewer": True, "preview": False, "search": False},
1.0,
)
EXPECTED_PREVIEW_OK = (
{"viewer": False, "preview": True, "search": False},
1.0,
)
EXPECTED_SEARCH_OK = (
{"viewer": False, "preview": False, "search": True},
1.0,
)
EXPECTED_ALL_OK = (
{"viewer": True, "preview": True, "search": True},
1.0,
)
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
split: str,
app_config: AppConfig,
) -> SplitIsValidJobRunner:
processing_step_name = SplitIsValidJobRunner.get_job_type()
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
http_status=HTTPStatus.OK,
)
return SplitIsValidJobRunner(
job_info={
"type": SplitIsValidJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"config": config,
"split": split,
"revision": "revision",
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 20,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
processing_graph=processing_graph,
)
return _get_job_runner
@pytest.mark.parametrize(
"upstream_responses,expected",
[
(
[
UPSTREAM_RESPONSE_CONFIG_SIZE,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING,
UPSTREAM_RESPONSE_SPLIT_DUCKDB_INDEX,
],
EXPECTED_ALL_OK,
),
(
[
UPSTREAM_RESPONSE_CONFIG_SIZE_ERROR,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING,
],
EXPECTED_PREVIEW_OK,
),
(
[
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING,
],
EXPECTED_PREVIEW_OK,
),
(
[
UPSTREAM_RESPONSE_CONFIG_SIZE,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET_ERROR,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING,
UPSTREAM_RESPONSE_SPLIT_DUCKDB_INDEX,
],
EXPECTED_ALL_OK,
),
(
[
UPSTREAM_RESPONSE_CONFIG_SIZE,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET_ERROR,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING_ERROR,
],
EXPECTED_VIEWER_OK,
),
(
[
UPSTREAM_RESPONSE_CONFIG_SIZE,
],
EXPECTED_VIEWER_OK,
),
(
[
UPSTREAM_RESPONSE_SPLIT_DUCKDB_INDEX,
],
EXPECTED_SEARCH_OK,
),
(
[
UPSTREAM_RESPONSE_CONFIG_SIZE_ERROR,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET_ERROR,
UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING_ERROR,
UPSTREAM_RESPONSE_SPLIT_DUCKDB_INDEX_ERROR,
],
EXPECTED_ERROR,
),
(
[],
EXPECTED_ERROR,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
upstream_responses: list[UpstreamResponse],
expected: Any,
) -> None:
dataset, config, split = DATASET, CONFIG, SPLIT
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, config, split, app_config)
compute_result = job_runner.compute()
assert compute_result.content == expected[0]
assert compute_result.progress == expected[1]
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset, config, split = "doesnotexist", "doesnotexist", "doesnotexist"
job_runner = get_job_runner(dataset, config, split, app_config)
compute_result = job_runner.compute()
assert compute_result.content == {"viewer": False, "preview": False, "search": False}
| datasets-server-main | services/worker/tests/job_runners/split/test_is_valid.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import io
from collections.abc import Callable, Mapping
from http import HTTPStatus
from pathlib import Path
from typing import Any, Optional
from unittest.mock import patch
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets import Dataset, Features, Value
from fsspec.implementations.http import HTTPFile, HTTPFileSystem
from huggingface_hub import hf_hub_url
from libcommon.exceptions import PreviousStepFormatError
from libcommon.parquet_utils import ParquetIndexWithMetadata
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedArtifactError, upsert_response
from libcommon.storage import StrPath
from libcommon.utils import Priority, SplitHubFile
from worker.config import AppConfig
from worker.dtos import (
ConfigParquetMetadataResponse,
ConfigParquetResponse,
ParquetFileMetadataItem,
)
from worker.job_runners.config.parquet_metadata import ConfigParquetMetadataJobRunner
from ...constants import CI_USER_TOKEN
from ...fixtures.hub import hf_api
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, AppConfig], ConfigParquetMetadataJobRunner]
dummy_parquet_buffer = io.BytesIO()
pq.write_table(pa.table({"a": [0, 1, 2]}), dummy_parquet_buffer)
@pytest.fixture
def get_job_runner(
parquet_metadata_directory: StrPath,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigParquetMetadataJobRunner:
processing_step_name = ConfigParquetMetadataJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": ConfigParquetMetadataJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigParquetMetadataJobRunner(
job_info={
"type": ConfigParquetMetadataJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
parquet_metadata_directory=parquet_metadata_directory,
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config,upstream_status,upstream_content,expected_error_code,expected_content,should_raise",
[
(
"ok",
"config_1",
HTTPStatus.OK,
ConfigParquetResponse(
parquet_files=[
SplitHubFile(
dataset="ok", config="config_1", split="train", url="url1", filename="filename1", size=0
),
SplitHubFile(
dataset="ok", config="config_1", split="train", url="url2", filename="filename2", size=0
),
],
partial=False,
features=None,
),
None,
ConfigParquetMetadataResponse(
parquet_files_metadata=[
ParquetFileMetadataItem(
dataset="ok",
config="config_1",
split="train",
url="url1",
filename="filename1",
size=0,
num_rows=3,
parquet_metadata_subpath="ok/--/config_1/train/filename1",
),
ParquetFileMetadataItem(
dataset="ok",
config="config_1",
split="train",
url="url2",
filename="filename2",
size=0,
num_rows=3,
parquet_metadata_subpath="ok/--/config_1/train/filename2",
),
],
partial=False,
features=None,
),
False,
),
(
"status_error",
"config_1",
HTTPStatus.NOT_FOUND,
{"error": "error"},
CachedArtifactError.__name__,
None,
True,
),
(
"format_error",
"config_1",
HTTPStatus.OK,
{"not_parquet_files": "wrong_format"},
PreviousStepFormatError.__name__,
None,
True,
),
(
"with_features",
"config_1",
HTTPStatus.OK,
ConfigParquetResponse(
parquet_files=[
SplitHubFile(
dataset="with_features",
config="config_1",
split="train",
url="url1",
filename="filename1",
size=0,
),
SplitHubFile(
dataset="with_features",
config="config_1",
split="train",
url="url2",
filename="filename2",
size=0,
),
],
partial=False,
features=Features({"a": Value("string")}).to_dict(),
),
None,
ConfigParquetMetadataResponse(
parquet_files_metadata=[
ParquetFileMetadataItem(
dataset="with_features",
config="config_1",
split="train",
url="url1",
filename="filename1",
size=0,
num_rows=3,
parquet_metadata_subpath="with_features/--/config_1/train/filename1",
),
ParquetFileMetadataItem(
dataset="with_features",
config="config_1",
split="train",
url="url2",
filename="filename2",
size=0,
num_rows=3,
parquet_metadata_subpath="with_features/--/config_1/train/filename2",
),
],
partial=False,
features=Features({"a": Value("string")}).to_dict(),
),
False,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config: str,
upstream_status: HTTPStatus,
upstream_content: Any,
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
upsert_response(
kind="config-parquet",
dataset=dataset,
config=config,
content=upstream_content,
http_status=upstream_status,
)
job_runner = get_job_runner(dataset, config, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.type.__name__ == expected_error_code
else:
with patch("worker.job_runners.config.parquet_metadata.get_parquet_file") as mock_ParquetFile:
mock_ParquetFile.return_value = pq.ParquetFile(dummy_parquet_buffer)
assert job_runner.compute().content == expected_content
assert mock_ParquetFile.call_count == len(upstream_content["parquet_files"])
for parquet_file_item in upstream_content["parquet_files"]:
mock_ParquetFile.assert_any_call(
url=parquet_file_item["url"], fs=HTTPFileSystem(), hf_token=app_config.common.hf_token
)
assert expected_content["parquet_files_metadata"]
for parquet_file_metadata_item in expected_content["parquet_files_metadata"]:
assert (
pq.read_metadata(
Path(job_runner.parquet_metadata_directory)
/ parquet_file_metadata_item["parquet_metadata_subpath"]
)
== pq.ParquetFile(dummy_parquet_buffer).metadata
)
class AuthenticatedHTTPFile(HTTPFile): # type: ignore
last_url: Optional[str] = None
def __init__( # type: ignore
self,
fs,
url,
session=None,
block_size=None,
mode="rb",
cache_type="bytes",
cache_options=None,
size=None,
loop=None,
asynchronous=False,
**kwargs,
) -> None:
super().__init__(
fs,
url,
session=session,
block_size=block_size,
mode=mode,
cache_type=cache_type,
cache_options=cache_options,
size=size,
loop=loop,
asynchronous=asynchronous,
**kwargs,
)
assert self.kwargs == {"headers": {"authorization": f"Bearer {CI_USER_TOKEN}"}}
AuthenticatedHTTPFile.last_url = url
def test_ParquetIndexWithMetadata_query(
datasets: Mapping[str, Dataset], hub_public_big: str, tmp_path_factory: pytest.TempPathFactory
) -> None:
ds = datasets["big"]
httpfs = HTTPFileSystem(headers={"authorization": f"Bearer {CI_USER_TOKEN}"})
filename = next(
iter(
repo_file
for repo_file in hf_api.list_repo_files(repo_id=hub_public_big, repo_type="dataset")
if repo_file.endswith(".parquet")
)
)
url = hf_hub_url(repo_id=hub_public_big, filename=filename, repo_type="dataset")
metadata_path = str(tmp_path_factory.mktemp("test_ParquetIndexWithMetadata_query") / "metadata.parquet")
with httpfs.open(url) as f:
num_bytes = f.size
pf = pq.ParquetFile(url, filesystem=httpfs)
num_rows = pf.metadata.num_rows
features = Features.from_arrow_schema(pf.schema_arrow)
pf.metadata.write_metadata_file(metadata_path)
index = ParquetIndexWithMetadata(
features=features,
supported_columns=list(features),
unsupported_columns=[],
parquet_files_urls=[url],
metadata_paths=[metadata_path],
num_rows=[num_rows],
num_bytes=[num_bytes],
httpfs=httpfs,
hf_token=CI_USER_TOKEN,
max_arrow_data_in_memory=999999999,
)
with patch("libcommon.parquet_utils.HTTPFile", AuthenticatedHTTPFile):
out = index.query(offset=0, length=2).to_pydict()
assert out == ds[:2]
assert AuthenticatedHTTPFile.last_url == url
| datasets-server-main | services/worker/tests/job_runners/config/test_parquet_metadata.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from datasets import Features, Value
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedArtifactNotFoundError,
upsert_response,
)
from libcommon.utils import Priority, SplitHubFile
from worker.config import AppConfig
from worker.dtos import ConfigParquetAndInfoResponse, ConfigParquetResponse
from worker.job_runners.config.parquet import ConfigParquetJobRunner
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, AppConfig], ConfigParquetJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigParquetJobRunner:
processing_step_name = ConfigParquetJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": ConfigParquetJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigParquetJobRunner(
job_info={
"type": ConfigParquetJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config,upstream_status,upstream_content,expected_error_code,expected_content,should_raise",
[
(
"ok",
"config_1",
HTTPStatus.OK,
ConfigParquetAndInfoResponse(
parquet_files=[
SplitHubFile(
dataset="ok", config="config_1", split="train", url="url1", filename="filename1", size=0
),
SplitHubFile(
dataset="ok", config="config_1", split="train", url="url2", filename="filename2", size=0
),
],
dataset_info={"description": "value", "dataset_size": 10},
partial=False,
),
None,
ConfigParquetResponse(
parquet_files=[
SplitHubFile(
dataset="ok", config="config_1", split="train", url="url1", filename="filename1", size=0
),
SplitHubFile(
dataset="ok", config="config_1", split="train", url="url2", filename="filename2", size=0
),
],
partial=False,
features=None,
),
False,
),
(
"status_error",
"config_1",
HTTPStatus.NOT_FOUND,
{"error": "error"},
CachedArtifactError.__name__,
None,
True,
),
(
"format_error",
"config_1",
HTTPStatus.OK,
{"not_parquet_files": "wrong_format"},
PreviousStepFormatError.__name__,
None,
True,
),
(
"shards_order",
"config_1",
HTTPStatus.OK,
ConfigParquetAndInfoResponse(
parquet_files=[
SplitHubFile(
dataset="ok",
config="config_1",
split="train",
url="url1",
filename="0000.parquet",
size=0,
),
SplitHubFile(
dataset="ok",
config="config_1",
split="train",
url="url2",
filename="0001.parquet",
size=0,
),
SplitHubFile(
dataset="ok",
config="config_1",
split="test",
url="url2",
filename="0000.parquet",
size=0,
),
],
dataset_info={"description": "value", "dataset_size": 10},
partial=False,
),
None,
ConfigParquetResponse(
parquet_files=[
SplitHubFile(
dataset="ok",
config="config_1",
split="test",
url="url2",
filename="0000.parquet",
size=0,
),
SplitHubFile(
dataset="ok",
config="config_1",
split="train",
url="url1",
filename="0000.parquet",
size=0,
),
SplitHubFile(
dataset="ok",
config="config_1",
split="train",
url="url2",
filename="0001.parquet",
size=0,
),
],
partial=False,
features=None,
),
False,
),
(
"with_features",
"config_1",
HTTPStatus.OK,
ConfigParquetAndInfoResponse(
parquet_files=[
SplitHubFile(
dataset="with_features",
config="config_1",
split="train",
url="url1",
filename="filename1",
size=0,
),
SplitHubFile(
dataset="with_features",
config="config_1",
split="train",
url="url2",
filename="filename2",
size=0,
),
],
dataset_info={
"description": "value",
"dataset_size": 10,
"features": Features({"a": Value("string")}).to_dict(),
},
partial=False,
),
None,
ConfigParquetResponse(
parquet_files=[
SplitHubFile(
dataset="with_features",
config="config_1",
split="train",
url="url1",
filename="filename1",
size=0,
),
SplitHubFile(
dataset="with_features",
config="config_1",
split="train",
url="url2",
filename="filename2",
size=0,
),
],
partial=False,
features=Features({"a": Value("string")}).to_dict(),
),
False,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config: str,
upstream_status: HTTPStatus,
upstream_content: Any,
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
upsert_response(
kind="config-parquet-and-info",
dataset=dataset,
config=config,
content=upstream_content,
http_status=upstream_status,
)
job_runner = get_job_runner(dataset, config, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = config = "doesnotexist"
job_runner = get_job_runner(dataset, config, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/config/test_parquet.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedArtifactNotFoundError,
upsert_response,
)
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.split_names_from_info import (
ConfigSplitNamesFromInfoJobRunner,
)
GetJobRunner = Callable[[str, str, AppConfig], ConfigSplitNamesFromInfoJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigSplitNamesFromInfoJobRunner:
processing_step_name = ConfigSplitNamesFromInfoJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": ConfigSplitNamesFromInfoJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigSplitNamesFromInfoJobRunner(
job_info={
"type": ConfigSplitNamesFromInfoJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,upstream_status,upstream_content,error_code,content",
[
(
"ok",
HTTPStatus.OK,
{
"dataset_info": {
"splits": {
"train": {"name": "train", "dataset_name": "ok"},
"validation": {"name": "validation", "dataset_name": "ok"},
"test": {"name": "test", "dataset_name": "ok"},
},
}
},
None,
{
"splits": [
{"dataset": "ok", "config": "config_name", "split": "train"},
{"dataset": "ok", "config": "config_name", "split": "validation"},
{"dataset": "ok", "config": "config_name", "split": "test"},
]
},
),
(
"upstream_fail",
HTTPStatus.INTERNAL_SERVER_ERROR,
{"error": "error"},
CachedArtifactError.__name__,
None,
),
(
"without_dataset_info",
HTTPStatus.OK,
{"some_column": "wrong_format"},
PreviousStepFormatError.__name__,
None,
),
(
"without_config_name",
HTTPStatus.OK,
{"dataset_info": "wrong_format"},
PreviousStepFormatError.__name__,
None,
),
(
"without_splits",
HTTPStatus.OK,
{"dataset_info": {"config_name": "wrong_format"}},
PreviousStepFormatError.__name__,
None,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
upstream_status: HTTPStatus,
upstream_content: Any,
error_code: str,
content: Any,
) -> None:
config = "config_name"
upsert_response(
kind="config-info", dataset=dataset, config=config, content=upstream_content, http_status=upstream_status
)
job_runner = get_job_runner(dataset, config, app_config)
if error_code:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == error_code
else:
assert job_runner.compute().content == content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "non_existent"
config = "non_existent"
worker = get_job_runner(dataset, config, app_config)
with pytest.raises(CachedArtifactNotFoundError):
worker.compute()
| datasets-server-main | services/worker/tests/job_runners/config/test_split_names_from_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedArtifactNotFoundError,
upsert_response,
)
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.size import ConfigSizeJobRunner
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, AppConfig], ConfigSizeJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigSizeJobRunner:
processing_step_name = ConfigSizeJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": ConfigSizeJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigSizeJobRunner(
job_info={
"type": ConfigSizeJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config,upstream_status,upstream_content,expected_error_code,expected_content,should_raise",
[
(
"dataset_ok",
"config_1",
HTTPStatus.OK,
{
"parquet_files": [
{"dataset": "dataset_ok", "config": "config_1", "split": "train", "size": 14281188},
{"dataset": "dataset_ok", "config": "config_1", "split": "test", "size": 2383903},
],
"dataset_info": {
"features": {
"image": {"_type": "Image"},
"label": {
"names": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
"_type": "ClassLabel",
},
},
"splits": {
"train": {
"name": "train",
"num_bytes": 17470800,
"num_examples": 60000,
"dataset_name": "dataset_ok",
},
"test": {
"name": "test",
"num_bytes": 2916432,
"num_examples": 10000,
"dataset_name": "dataset_ok",
},
},
"download_checksums": {
"https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz": {
"num_bytes": 9912422,
"checksum": "440fcabf73cc546fa21475e81ea370265605f56be210a4024d2ca8f203523609",
},
"https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz": {
"num_bytes": 28881,
"checksum": "3552534a0a558bbed6aed32b30c495cca23d567ec52cac8be1a0730e8010255c",
},
"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz": {
"num_bytes": 1648877,
"checksum": "8d422c7b0a1c1c79245a5bcf07fe86e33eeafee792b84584aec276f5a2dbc4e6",
},
"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz": {
"num_bytes": 4542,
"checksum": "f7ae60f92e00ec6debd23a6088c31dbd2371eca3ffa0defaefb259924204aec6",
},
},
"download_size": 11594722,
"dataset_size": 20387232,
"size_in_bytes": 31981954,
},
"partial": False,
},
None,
{
"size": {
"config": {
"dataset": "dataset_ok",
"config": "config_1",
"num_bytes_original_files": 11594722,
"num_bytes_parquet_files": 16665091,
"num_bytes_memory": 20387232,
"num_rows": 70000,
"num_columns": 2,
},
"splits": [
{
"dataset": "dataset_ok",
"config": "config_1",
"split": "train",
"num_bytes_parquet_files": 14281188,
"num_bytes_memory": 17470800,
"num_rows": 60000,
"num_columns": 2,
},
{
"dataset": "dataset_ok",
"config": "config_1",
"split": "test",
"num_bytes_parquet_files": 2383903,
"num_bytes_memory": 2916432,
"num_rows": 10000,
"num_columns": 2,
},
],
},
"partial": False,
},
False,
),
(
"status_error",
"config_1",
HTTPStatus.NOT_FOUND,
{"error": "error"},
CachedArtifactError.__name__,
None,
True,
),
(
"format_error",
"config_1",
HTTPStatus.OK,
{"not_dataset_info": "wrong_format"},
PreviousStepFormatError.__name__,
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config: str,
upstream_status: HTTPStatus,
upstream_content: Any,
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
upsert_response(
kind="config-parquet-and-info",
dataset=dataset,
config=config,
content=upstream_content,
http_status=upstream_status,
)
job_runner = get_job_runner(dataset, config, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = config = "doesnotexist"
job_runner = get_job_runner(dataset, config, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/config/test_size.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/tests/job_runners/config/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedArtifactNotFoundError, upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.opt_in_out_urls_count import (
ConfigOptInOutUrlsCountJobRunner,
)
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, AppConfig], ConfigOptInOutUrlsCountJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigOptInOutUrlsCountJobRunner:
processing_step_name = ConfigOptInOutUrlsCountJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "config",
"job_runner_version": ConfigOptInOutUrlsCountJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigOptInOutUrlsCountJobRunner(
job_info={
"type": ConfigOptInOutUrlsCountJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config,split_names_status,split_names_content,spawning_status"
+ ",spawning_content,expected_error_code,expected_content,should_raise",
[
(
"dataset_ok_full_scan",
"config",
HTTPStatus.OK,
{
"splits": [
{"dataset": "dataset_ok_full_scan", "config": "config", "split": "split"},
{"dataset": "dataset_ok_full_scan", "config": "config", "split": "split2"},
{"dataset": "dataset_ok_full_scan", "config": "config", "split": "split3"},
]
},
[HTTPStatus.OK, HTTPStatus.OK, HTTPStatus.OK],
[
{
"urls_columns": ["url"],
"num_opt_in_urls": 1,
"num_opt_out_urls": 2,
"num_urls": 10,
"num_scanned_rows": 100,
"has_urls_columns": True,
"full_scan": True,
},
{
"urls_columns": [],
"num_opt_in_urls": 0,
"num_opt_out_urls": 0,
"num_urls": 0,
"num_scanned_rows": 30,
"has_urls_columns": False,
"full_scan": True,
},
{
"urls_columns": [],
"num_opt_in_urls": 0,
"num_opt_out_urls": 0,
"num_urls": 0,
"num_scanned_rows": 30,
"has_urls_columns": False,
"full_scan": True,
},
],
None,
{
"urls_columns": ["url"],
"num_opt_in_urls": 1,
"num_opt_out_urls": 2,
"num_urls": 10,
"num_scanned_rows": 160,
"has_urls_columns": True,
"full_scan": True,
},
False,
),
(
"dataset_ok_not_full_scan",
"config",
HTTPStatus.OK,
{
"splits": [
{"dataset": "dataset_ok_not_full_scan", "config": "config", "split": "split"},
{"dataset": "dataset_ok_not_full_scan", "config": "config", "split": "split2"},
{"dataset": "dataset_ok_not_full_scan", "config": "config", "split": "split3"},
]
},
[HTTPStatus.OK, HTTPStatus.OK, HTTPStatus.OK],
[
{
"urls_columns": ["url"],
"num_opt_in_urls": 1,
"num_opt_out_urls": 2,
"num_urls": 10,
"num_scanned_rows": 100,
"has_urls_columns": True,
"full_scan": False,
},
{
"urls_columns": [],
"num_opt_in_urls": 0,
"num_opt_out_urls": 0,
"num_urls": 0,
"num_scanned_rows": 30,
"has_urls_columns": False,
"full_scan": True,
},
{
"urls_columns": [],
"num_opt_in_urls": 0,
"num_opt_out_urls": 0,
"num_urls": 0,
"num_scanned_rows": 30,
"has_urls_columns": False,
"full_scan": True,
},
],
None,
{
"urls_columns": ["url"],
"num_opt_in_urls": 1,
"num_opt_out_urls": 2,
"num_urls": 10,
"num_scanned_rows": 160,
"has_urls_columns": True,
"full_scan": False,
},
False,
),
(
"previous_step_error",
"config",
HTTPStatus.INTERNAL_SERVER_ERROR,
{},
[],
[],
"CachedArtifactError",
None,
True,
),
(
"previous_step_format_error",
"config",
HTTPStatus.OK,
{
"splits": [
{"dataset": "dataset_ok", "config": "config", "split": "split"},
{"dataset": "dataset_ok", "config": "config", "split": "split2"},
]
},
[HTTPStatus.OK],
[{"wrong_format": None}],
"PreviousStepFormatError",
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config: str,
split_names_status: HTTPStatus,
split_names_content: Any,
spawning_status: list[HTTPStatus],
spawning_content: list[Any],
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content=split_names_content,
http_status=split_names_status,
)
if split_names_status == HTTPStatus.OK:
for split_item, status, content in zip(split_names_content["splits"], spawning_status, spawning_content):
upsert_response(
kind="split-opt-in-out-urls-count",
dataset=dataset,
config=split_item["config"],
split=split_item["split"],
content=content,
http_status=status,
)
job_runner = get_job_runner(dataset, config, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = config = "doesnotexist"
job_runner = get_job_runner(dataset, config, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from http import HTTPStatus
from typing import Optional
import pytest
from libcommon.exceptions import CustomError
from libcommon.processing_graph import ProcessingStep
from libcommon.resources import CacheMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_runners.config.config_job_runner import ConfigJobRunner
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
class DummyConfigJobRunner(ConfigJobRunner):
@staticmethod
def get_job_runner_version() -> int:
return 1
@staticmethod
def get_job_type() -> str:
return "/dummy"
def compute(self) -> CompleteJobResult:
return CompleteJobResult({"key": "value"})
def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
with pytest.raises(CustomError) as exc_info:
DummyConfigJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": "dataset",
"revision": "revision",
"config": None,
"split": None,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
).validate()
assert exc_info.value.code == "ParameterMissingError"
@pytest.mark.parametrize(
"upsert_config,exception_name",
[
("config", None),
("other_config", "ConfigNotFoundError"),
],
)
def test_creation(
test_processing_step: ProcessingStep,
app_config: AppConfig,
upsert_config: str,
exception_name: Optional[str],
) -> None:
dataset, config = "dataset", "config"
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": upsert_config}]},
http_status=HTTPStatus.OK,
)
if exception_name is None:
DummyConfigJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
).validate()
else:
with pytest.raises(CustomError) as exc_info:
DummyConfigJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
).validate()
assert exc_info.value.code == exception_name
| datasets-server-main | services/worker/tests/job_runners/config/test_config_job_runner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import io
import os
from collections.abc import Callable, Iterator
from contextlib import contextmanager
from dataclasses import replace
from fnmatch import fnmatch
from http import HTTPStatus
from multiprocessing import Pool
from pathlib import Path
from typing import Any, Optional, TypedDict
from unittest.mock import patch
import datasets.builder
import datasets.config
import datasets.info
import pandas as pd
import pyarrow.parquet as pq
import pytest
import requests
from datasets import Audio, Features, Image, Value, load_dataset_builder
from datasets.packaged_modules.generator.generator import (
Generator as ParametrizedGeneratorBasedBuilder,
)
from datasets.utils.py_utils import asdict
from huggingface_hub.hf_api import CommitOperationAdd, HfApi
from libcommon.dataset import get_dataset_info_for_supported_datasets
from libcommon.exceptions import (
CustomError,
DatasetInBlockListError,
DatasetManualDownloadError,
)
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.queue import Queue
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import JobInfo, JobParams, Priority
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_manager import JobManager
from worker.job_runners.config.parquet_and_info import (
ConfigParquetAndInfoJobRunner,
ParquetFileValidator,
TooBigRowGroupsError,
_is_too_big_from_datasets,
_is_too_big_from_external_data_files,
_is_too_big_from_hub,
create_commits,
fill_builder_info,
get_delete_operations,
get_writer_batch_size_from_info,
get_writer_batch_size_from_row_group_size,
limit_parquet_writes,
list_generated_parquet_files,
parse_repo_filename,
raise_if_blocked,
raise_if_requires_manual_download,
stream_convert_to_parquet,
)
from worker.job_runners.dataset.config_names import DatasetConfigNamesJobRunner
from worker.resources import LibrariesResource
from ...constants import CI_HUB_ENDPOINT, CI_USER_TOKEN
from ...fixtures.hub import HubDatasetTest
@contextmanager
def blocked(app_config: AppConfig, repo_id: str) -> Iterator[None]:
app_config.parquet_and_info.blocked_datasets.append(repo_id)
yield
app_config.parquet_and_info.blocked_datasets.remove(repo_id)
GetJobRunner = Callable[[str, str, AppConfig], ConfigParquetAndInfoJobRunner]
@pytest.fixture
def get_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigParquetAndInfoJobRunner:
processing_step_name = ConfigParquetAndInfoJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": ConfigParquetAndInfoJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigParquetAndInfoJobRunner(
job_info={
"type": ConfigParquetAndInfoJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
def assert_content_is_equal(content: Any, expected: Any) -> None:
print(content)
assert set(content) == {"parquet_files", "dataset_info", "partial"}, content
assert content["parquet_files"] == expected["parquet_files"], content
assert len(content["dataset_info"]) == len(expected["dataset_info"]), content
content_value = content["dataset_info"]
expected_value = expected["dataset_info"]
assert set(content_value.keys()) == set(expected_value.keys()), content
for key in content_value.keys():
if key != "download_checksums":
assert content_value[key] == expected_value[key], content
assert content["partial"] == expected["partial"], content
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
hub_responses_public: HubDatasetTest,
) -> None:
dataset = hub_responses_public["name"]
config = hub_responses_public["config_names_response"]["config_names"][0]["config"]
job_runner = get_job_runner(dataset, config, app_config)
response = job_runner.compute()
assert response
content = response.content
assert content
assert len(content["parquet_files"]) == 1
assert_content_is_equal(content, hub_responses_public["parquet_and_info_response"])
def test_compute_legacy_configs(
app_config: AppConfig,
get_job_runner: GetJobRunner,
hub_public_legacy_configs: str,
) -> None:
app_config = replace(app_config, parquet_and_info=replace(app_config.parquet_and_info, max_dataset_size=20_000))
dataset_name = hub_public_legacy_configs
original_configs = {"first", "second"}
# first compute and push parquet files for each config for dataset with script with two configs
for config in original_configs:
job_runner = get_job_runner(dataset_name, config, app_config)
# needed to overwrite default record when creating job runner
upsert_response(
kind="dataset-config-names",
dataset=hub_public_legacy_configs,
http_status=HTTPStatus.OK,
content={
"config_names": [
{"dataset": hub_public_legacy_configs, "config": "first"},
{"dataset": hub_public_legacy_configs, "config": "second"},
],
},
)
assert job_runner.compute()
hf_api = HfApi(endpoint=CI_HUB_ENDPOINT, token=CI_USER_TOKEN)
dataset_info = hf_api.dataset_info(
repo_id=hub_public_legacy_configs, revision=app_config.parquet_and_info.target_revision, files_metadata=False
)
repo_files = {f.rfilename for f in dataset_info.siblings}
# assert that there are only parquet files for dataset's configs and ".gitattributes" in a repo
# (no files from 'main')
assert ".gitattributes" in repo_files
assert all(
fnmatch(file, "first/*/*.parquet") or fnmatch(file, "second/*/*.parquet")
for file in repo_files.difference({".gitattributes"})
)
orig_repo_configs = {f.rfilename.split("/")[0] for f in dataset_info.siblings if f.rfilename.endswith(".parquet")}
# assert that both configs are pushed (push of second config didn't delete first config's files)
assert len(orig_repo_configs) == 2
assert orig_repo_configs == original_configs
# then change the set of dataset configs (remove "second")
job_runner = get_job_runner(dataset_name, "first", app_config)
assert job_runner.compute()
dataset_info = hf_api.dataset_info(
repo_id=hub_public_legacy_configs, revision=app_config.parquet_and_info.target_revision, files_metadata=False
)
updated_repo_files = {f.rfilename for f in dataset_info.siblings}
# assert that legacy config is removed from the repo
# and there are only files for config that was just pushed and .gitattributes
assert ".gitattributes" in updated_repo_files
assert all(fnmatch(file, "first/*/*.parquet") for file in updated_repo_files.difference({".gitattributes"}))
updated_repo_configs = {
f.rfilename.split("/")[0] for f in dataset_info.siblings if f.rfilename.endswith(".parquet")
}
assert len(updated_repo_configs) == 1
assert updated_repo_configs == {"first"}
@pytest.mark.parametrize(
"dataset,blocked,raises",
[
("public", ["public"], True),
("public", ["public", "audio"], True),
("public", ["audio"], False),
("public", [], False),
],
)
def test_raise_if_blocked(dataset: str, blocked: list[str], raises: bool) -> None:
if raises:
with pytest.raises(DatasetInBlockListError):
raise_if_blocked(dataset=dataset, blocked_datasets=blocked)
else:
raise_if_blocked(dataset=dataset, blocked_datasets=blocked)
def test_raise_if_requires_manual_download(hub_public_manual_download: str, app_config: AppConfig) -> None:
builder = load_dataset_builder(hub_public_manual_download)
with pytest.raises(DatasetManualDownloadError):
raise_if_requires_manual_download(
builder=builder,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
)
@pytest.mark.parametrize(
"name,expected",
[("public", False), ("big", True)],
)
def test__is_too_big_from_hub(
hub_public_csv: str,
hub_public_big: str,
name: str,
expected: bool,
app_config: AppConfig,
) -> None:
dataset = hub_public_csv if name == "public" else hub_public_big
dataset_info = get_dataset_info_for_supported_datasets(
dataset=dataset,
hf_endpoint=app_config.common.hf_endpoint,
hf_token=app_config.common.hf_token,
revision="main",
files_metadata=True,
)
assert (
_is_too_big_from_hub(dataset_info=dataset_info, max_dataset_size=app_config.parquet_and_info.max_dataset_size)
== expected
)
@pytest.mark.parametrize(
"name,expected",
[("public", False), ("big", True)],
)
def test__is_too_big_from_datasets(
hub_public_csv: str,
hub_public_big: str,
name: str,
expected: bool,
app_config: AppConfig,
) -> None:
dataset = hub_public_csv if name == "public" else hub_public_big
builder = load_dataset_builder(dataset)
assert (
_is_too_big_from_datasets(
info=builder.info,
max_dataset_size=app_config.parquet_and_info.max_dataset_size,
)
== expected
)
@pytest.mark.parametrize(
"max_dataset_size,max_external_data_files,expected",
[
(None, None, False),
(10, None, True),
],
)
def test__is_too_big_external_files(
external_files_dataset_builder: "datasets.builder.DatasetBuilder",
expected: bool,
max_dataset_size: Optional[int],
max_external_data_files: Optional[int],
app_config: AppConfig,
) -> None:
max_dataset_size = max_dataset_size or app_config.parquet_and_info.max_dataset_size
max_external_data_files = max_external_data_files or app_config.parquet_and_info.max_external_data_files
assert (
_is_too_big_from_external_data_files(
builder=external_files_dataset_builder,
hf_token=app_config.common.hf_token,
max_dataset_size=max_dataset_size,
max_external_data_files=max_external_data_files,
)
== expected
)
@pytest.mark.parametrize(
"max_dataset_size,max_external_data_files,expected",
[
(None, None, False),
(None, 1, True),
],
)
def test_raise_if_too_many_external_files(
external_files_dataset_builder: "datasets.builder.DatasetBuilder",
expected: bool,
max_dataset_size: Optional[int],
max_external_data_files: Optional[int],
app_config: AppConfig,
) -> None:
max_dataset_size = max_dataset_size or app_config.parquet_and_info.max_dataset_size
max_external_data_files = max_external_data_files or app_config.parquet_and_info.max_external_data_files
assert (
_is_too_big_from_external_data_files(
builder=external_files_dataset_builder,
hf_token=app_config.common.hf_token,
max_dataset_size=max_dataset_size,
max_external_data_files=max_external_data_files,
)
== expected
)
def test_supported_if_big_parquet(
app_config: AppConfig,
get_job_runner: GetJobRunner,
hub_responses_big: HubDatasetTest,
) -> None:
# Not in the list of supported datasets and bigger than the maximum size
# but still supported since it's made of parquet files
# dataset = hub_public_big
dataset = hub_responses_big["name"]
config = hub_responses_big["config_names_response"]["config_names"][0]["config"]
job_runner = get_job_runner(dataset, config, app_config)
response = job_runner.compute()
assert response
content = response.content
assert content
assert len(content["parquet_files"]) == 1
assert_content_is_equal(content, hub_responses_big["parquet_and_info_response"])
def test_partially_converted_if_big_non_parquet(
app_config: AppConfig,
get_job_runner: GetJobRunner,
hub_responses_big_csv: HubDatasetTest,
) -> None:
# Not in the list of supported datasets and bigger than the maximum size
# dataset = hub_public_big_csv
dataset = hub_responses_big_csv["name"]
config = hub_responses_big_csv["config_names_response"]["config_names"][0]["config"]
job_runner = get_job_runner(dataset, config, app_config)
from datasets.packaged_modules.csv.csv import CsvConfig
# Set a small chunk size to yield more than one Arrow Table in _generate_tables
# to be able to stop the generation mid-way.
with patch.object(CsvConfig, "pd_read_csv_kwargs", {"chunksize": 10}):
response = job_runner.compute()
assert response
content = response.content
assert content
assert len(content["parquet_files"]) == 1
assert_content_is_equal(content, hub_responses_big_csv["parquet_and_info_response"])
# dataset is partially generated
assert content["parquet_files"][0]["size"] < app_config.parquet_and_info.max_dataset_size
assert content["parquet_files"][0]["url"].endswith("/partial-train/0000.parquet")
def test_supported_if_gated(
app_config: AppConfig,
get_job_runner: GetJobRunner,
hub_responses_gated: HubDatasetTest,
) -> None:
# Access must be granted
dataset = hub_responses_gated["name"]
config = hub_responses_gated["config_names_response"]["config_names"][0]["config"]
job_runner = get_job_runner(dataset, config, app_config)
response = job_runner.compute()
assert response
assert response.content
def test_blocked(
app_config: AppConfig,
get_job_runner: GetJobRunner,
hub_reponses_jsonl: HubDatasetTest,
) -> None:
# In the list of blocked datasets
with blocked(app_config, repo_id=hub_reponses_jsonl["name"]):
dataset = hub_reponses_jsonl["name"]
config = hub_reponses_jsonl["config_names_response"]["config_names"][0]["config"]
job_runner = get_job_runner(dataset, config, app_config)
with pytest.raises(CustomError) as e:
job_runner.compute()
assert e.typename == "DatasetInBlockListError"
@pytest.mark.parametrize(
"name",
["public", "audio", "gated"],
)
def test_compute_splits_response_simple_csv_ok(
hub_responses_public: HubDatasetTest,
hub_responses_audio: HubDatasetTest,
hub_responses_gated: HubDatasetTest,
get_job_runner: GetJobRunner,
name: str,
app_config: AppConfig,
data_df: pd.DataFrame,
) -> None:
hub_datasets = {"public": hub_responses_public, "audio": hub_responses_audio, "gated": hub_responses_gated}
dataset = hub_datasets[name]["name"]
config = hub_datasets[name]["config_names_response"]["config_names"][0]["config"]
expected_parquet_and_info_response = hub_datasets[name]["parquet_and_info_response"]
job_runner = get_job_runner(dataset, config, app_config)
result = job_runner.compute().content
assert_content_is_equal(result, expected_parquet_and_info_response)
# download the parquet file and check that it is valid
if name == "audio":
return
if name == "public":
df = pd.read_parquet(result["parquet_files"][0]["url"], engine="auto")
else:
# in all these cases, the parquet files are not accessible without a token
with pytest.raises(Exception):
pd.read_parquet(result["parquet_files"][0]["url"], engine="auto")
r = requests.get(
result["parquet_files"][0]["url"], headers={"Authorization": f"Bearer {app_config.common.hf_token}"}
)
assert r.status_code == HTTPStatus.OK, r.text
df = pd.read_parquet(io.BytesIO(r.content), engine="auto")
assert df.equals(data_df), df
@pytest.mark.parametrize(
"name,error_code,cause",
[
("private", "DatasetNotFoundError", None),
],
)
def test_compute_splits_response_simple_csv_error(
hub_responses_private: HubDatasetTest,
get_job_runner: GetJobRunner,
name: str,
error_code: str,
cause: str,
app_config: AppConfig,
) -> None:
dataset = hub_responses_private["name"]
config_names_response = hub_responses_private["config_names_response"]
config = config_names_response["config_names"][0]["config"] if config_names_response else None
job_runner = get_job_runner(dataset, config, app_config)
with pytest.raises(CustomError) as exc_info:
job_runner.compute()
assert exc_info.value.code == error_code
assert exc_info.value.cause_exception == cause
if exc_info.value.disclose_cause:
response = exc_info.value.as_response()
assert set(response.keys()) == {"error", "cause_exception", "cause_message", "cause_traceback"}
response_dict = dict(response)
# ^ to remove mypy warnings
assert response_dict["cause_exception"] == cause
assert isinstance(response_dict["cause_traceback"], list)
assert response_dict["cause_traceback"][0] == "Traceback (most recent call last):\n"
@pytest.mark.parametrize(
"upstream_status,upstream_content,exception_name",
[
(HTTPStatus.NOT_FOUND, {"error": "error"}, "CachedArtifactError"),
(HTTPStatus.OK, {"not_config_names": "wrong_format"}, "PreviousStepFormatError"),
(HTTPStatus.OK, {"config_names": "not a list"}, "PreviousStepFormatError"),
],
)
def test_previous_step_error(
get_job_runner: GetJobRunner,
upstream_status: HTTPStatus,
upstream_content: Any,
exception_name: str,
hub_responses_public: HubDatasetTest,
app_config: AppConfig,
) -> None:
dataset = hub_responses_public["name"]
config = hub_responses_public["config_names_response"]["config_names"][0]["config"]
job_runner = get_job_runner(dataset, config, app_config)
upsert_response(
"dataset-config-names",
dataset=dataset,
http_status=upstream_status,
content=upstream_content,
)
with pytest.raises(Exception) as exc_info:
job_runner.compute()
assert exc_info.typename == exception_name
@pytest.mark.parametrize(
"filename,split,config,raises",
[
("config/split/0000.parquet", "split", "config", False),
("config/split.with.dots/0000.parquet", "split.with.dots", "config", False),
("config/partial-split/0000.parquet", "split", "config", False),
("config/partial-split.with.dots/0000.parquet", "split.with.dots", "config", False),
("config/partial-toomanyzeros/00000.parquet", "toomanyzeros", "config", True),
("config/builder-split.parquet", "split", "config", True),
("plain_text/train/0000.parquet", "train", "plain_text", False),
("plain_text/train/0001.parquet", "train", "plain_text", False),
],
)
def test_parse_repo_filename(filename: str, split: str, config: str, raises: bool) -> None:
if raises:
with pytest.raises(Exception):
parse_repo_filename(filename)
else:
assert parse_repo_filename(filename) == (config, split)
@pytest.mark.parametrize(
"ds_info, has_big_chunks",
[
(datasets.info.DatasetInfo(), False),
(datasets.info.DatasetInfo(features=Features({"text": Value("string")})), False),
(datasets.info.DatasetInfo(features=Features({"image": Image()})), True),
(datasets.info.DatasetInfo(features=Features({"audio": Audio()})), True),
(datasets.info.DatasetInfo(features=Features({"nested": [{"image": Image()}]})), True),
(datasets.info.DatasetInfo(features=Features({"blob": Value("binary")})), True),
],
)
def test_get_writer_batch_size_from_info(ds_info: datasets.info.DatasetInfo, has_big_chunks: bool) -> None:
assert get_writer_batch_size_from_info(ds_info) == (100 if has_big_chunks else None)
@pytest.mark.parametrize(
"max_operations_per_commit,use_parent_commit,expected_num_commits",
[(2, False, 1), (1, False, 2), (2, True, 1), (1, True, 2)],
)
def test_create_commits(
hub_public_legacy_configs: str, max_operations_per_commit: int, use_parent_commit: bool, expected_num_commits: int
) -> None:
NUM_FILES = 2
repo_id = hub_public_legacy_configs
hf_api = HfApi(endpoint=CI_HUB_ENDPOINT, token=CI_USER_TOKEN)
if use_parent_commit:
target_dataset_info = hf_api.dataset_info(repo_id=repo_id, files_metadata=False)
parent_commit = target_dataset_info.sha
else:
parent_commit = None
directory = f".test_create_commits_{max_operations_per_commit}_{use_parent_commit}"
operations: list[CommitOperationAdd] = [
CommitOperationAdd(path_in_repo=f"{directory}/file{i}.txt", path_or_fileobj=f"content{i}".encode("UTF-8"))
for i in range(NUM_FILES)
]
commit_infos = create_commits(
hf_api=hf_api,
repo_id=repo_id,
operations=operations,
commit_message="test",
max_operations_per_commit=max_operations_per_commit,
parent_commit=parent_commit,
)
assert len(commit_infos) == expected_num_commits
# check that the files were created
filenames = hf_api.list_repo_files(repo_id=repo_id, repo_type="dataset")
for i in range(NUM_FILES):
assert f"{directory}/file{i}.txt" in filenames
GetDatasetConfigNamesJobRunner = Callable[[str, AppConfig], DatasetConfigNamesJobRunner]
@pytest.fixture
def get_dataset_config_names_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetDatasetConfigNamesJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetConfigNamesJobRunner:
processing_step_name = DatasetConfigNamesJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DatasetConfigNamesJobRunner.get_job_runner_version(),
}
}
)
return DatasetConfigNamesJobRunner(
job_info={
"type": DatasetConfigNamesJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": None,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
class JobRunnerArgs(TypedDict):
dataset: str
revision: str
config: str
app_config: AppConfig
tmp_path: Path
def launch_job_runner(job_runner_args: JobRunnerArgs) -> CompleteJobResult:
config = job_runner_args["config"]
dataset = job_runner_args["dataset"]
revision = job_runner_args["revision"]
app_config = job_runner_args["app_config"]
tmp_path = job_runner_args["tmp_path"]
job_runner = ConfigParquetAndInfoJobRunner(
job_info=JobInfo(
job_id=f"job_{config}",
type="config-parquet-and-info",
params=JobParams(dataset=dataset, revision=revision, config=config, split=None),
priority=Priority.NORMAL,
difficulty=50,
),
app_config=app_config,
processing_step=ProcessingStep(
name="config-parquet-and-info",
input_type="config",
job_runner_version=ConfigParquetAndInfoJobRunner.get_job_runner_version(),
difficulty=50,
),
hf_datasets_cache=tmp_path,
)
return job_runner.compute()
def test_concurrency(
hub_public_n_configs: str,
app_config: AppConfig,
tmp_path: Path,
get_dataset_config_names_job_runner: GetDatasetConfigNamesJobRunner,
queue_mongo_resource: QueueMongoResource,
cache_mongo_resource: CacheMongoResource,
) -> None:
"""
Test that multiple job runners (to compute config-parquet-and-info) can run in parallel,
without having conflicts when sending commits to the Hub.
For this test, we need a lot of configs for the same dataset (say 20) and one job runner for each.
Ideally we would try for both quick and slow jobs.
"""
repo_id = hub_public_n_configs
hf_api = HfApi(endpoint=CI_HUB_ENDPOINT, token=CI_USER_TOKEN)
revision = hf_api.dataset_info(repo_id=repo_id, files_metadata=False).sha
if revision is None:
raise ValueError(f"Could not find revision for dataset {repo_id}")
# fill the cache for the step dataset-config-names, required by the job_runner
# it's a lot of code 😅
job_info = JobInfo(
job_id="not_used",
type="dataset-config-names",
params=JobParams(dataset=repo_id, revision=revision, config=None, split=None),
priority=Priority.NORMAL,
difficulty=50,
)
queue = Queue()
queue.create_jobs([job_info])
job_info = queue.start_job(job_types_only=["dataset-config-names"])
job_manager = JobManager(
job_info=job_info,
app_config=app_config,
processing_graph=ProcessingGraph(
{
"dataset-config-names": {
"input_type": "dataset",
"provides_dataset_config_names": True,
"job_runner_version": DatasetConfigNamesJobRunner.get_job_runner_version(),
}
}
),
job_runner=get_dataset_config_names_job_runner(repo_id, app_config),
)
job_result = job_manager.run_job()
job_manager.finish(job_result=job_result)
if not job_result["output"]:
raise ValueError("Could not get config names")
configs = [str(config_name["config"]) for config_name in job_result["output"]["content"]["config_names"]]
# launch the job runners
NUM_JOB_RUNNERS = 10
with Pool(NUM_JOB_RUNNERS) as pool:
pool.map(
launch_job_runner,
[
JobRunnerArgs(
dataset=repo_id, revision=revision, config=config, app_config=app_config, tmp_path=tmp_path
)
for config in configs
],
)
@pytest.mark.parametrize(
"parquet_files,all_repo_files,config_names,config,deleted_files",
[
(
set(),
{"dummy", "c1/dummy", "c1/0.parquet", "c2/0.parquet", "c1/index.duckdb"},
{"c1", "c2"},
"c1",
{"dummy", "c1/dummy", "c1/0.parquet"},
),
(
{"c1/0.parquet"},
{"dummy", "c1/dummy", "c1/0.parquet", "c2/0.parquet", "c1/index.duckdb"},
{"c1", "c2"},
"c1",
{"dummy", "c1/dummy"},
),
],
)
def test_get_delete_operations(
parquet_files: set[str], all_repo_files: set[str], config_names: set[str], config: str, deleted_files: set[str]
) -> None:
parquet_operations = [
CommitOperationAdd(path_in_repo=path_in_repo, path_or_fileobj=b"") for path_in_repo in parquet_files
]
delete_operations = get_delete_operations(
parquet_operations=parquet_operations, all_repo_files=all_repo_files, config_names=config_names, config=config
)
assert set(delete_operation.path_in_repo for delete_operation in delete_operations) == deleted_files
@pytest.mark.parametrize(
"max_dataset_size,expected_num_shards",
[
(1, 1),
(150, 2),
(300, 4),
(9999999, 10),
(None, 10),
],
)
def test_stream_convert_to_parquet_arrowbasedbuilder(
csv_path: str, max_dataset_size: int, expected_num_shards: int, tmp_path: Path
) -> None:
num_data_files = 10
builder = load_dataset_builder(
"csv",
data_files={"train": [csv_path] * num_data_files},
cache_dir=str(tmp_path / f"test_stream_convert_to_parquet-{max_dataset_size=}"),
)
with patch("worker.job_runners.config.parquet_and_info.get_writer_batch_size_from_info", lambda ds_config_info: 1):
with patch.object(datasets.config, "MAX_SHARD_SIZE", 1):
parquet_operations, partial = stream_convert_to_parquet(builder, max_dataset_size=max_dataset_size)
num_shards = len(parquet_operations)
assert num_shards == expected_num_shards
assert partial == (expected_num_shards < num_data_files)
assert all(isinstance(op.path_or_fileobj, str) for op in parquet_operations)
parquet_files = list_generated_parquet_files(builder, partial=partial)
assert len(parquet_files) == expected_num_shards
assert all(os.path.isfile(parquet_file.local_file) for parquet_file in parquet_files)
if max_dataset_size is not None:
one_sample_max_size = 100
expected_max_dataset_size = max_dataset_size + one_sample_max_size
assert (
sum(pq.ParquetFile(parquet_file.local_file).read().nbytes for parquet_file in parquet_files)
< expected_max_dataset_size
)
@pytest.mark.parametrize(
"max_dataset_size,expected_num_shards",
[
(1, 1),
(150, 19),
(300, 38),
(9999999, 1000),
(None, 1000),
],
)
def test_stream_convert_to_parquet_generatorbasedbuilder(
max_dataset_size: int, expected_num_shards: int, tmp_path: Path
) -> None:
num_rows = 1000
def long_generator() -> Iterator[dict[str, int]]:
for i in range(num_rows):
yield {"foo": i}
cache_dir = str(tmp_path / "test_limit_parquet_writes_cache_dir")
builder = ParametrizedGeneratorBasedBuilder(generator=long_generator, cache_dir=cache_dir)
with patch("worker.job_runners.config.parquet_and_info.get_writer_batch_size_from_info", lambda ds_config_info: 1):
with patch.object(datasets.config, "MAX_SHARD_SIZE", 1):
parquet_operations, partial = stream_convert_to_parquet(builder, max_dataset_size=max_dataset_size)
num_shards = len(parquet_operations)
assert num_shards == expected_num_shards
assert partial == (expected_num_shards < num_rows)
assert all(isinstance(op.path_or_fileobj, str) for op in parquet_operations)
parquet_files = list_generated_parquet_files(builder, partial=partial)
assert len(parquet_files) == expected_num_shards
assert all(os.path.isfile(parquet_file.local_file) for parquet_file in parquet_files)
if max_dataset_size is not None:
one_sample_max_size = 100
expected_max_dataset_size = max_dataset_size + one_sample_max_size
assert (
sum(pq.ParquetFile(parquet_file.local_file).read().nbytes for parquet_file in parquet_files)
< expected_max_dataset_size
)
def test_limit_parquet_writes(tmp_path: Path) -> None:
num_examples = 0
def long_generator() -> Iterator[dict[str, int]]:
nonlocal num_examples
for i in range(10_000_000):
yield {"foo": i}
num_examples += 1
one_sample_size = 8
max_dataset_size = 50_000
expected_max_dataset_size = max_dataset_size + datasets.config.DEFAULT_MAX_BATCH_SIZE * one_sample_size
expected_max_num_examples = 1 + max_dataset_size // one_sample_size + datasets.config.DEFAULT_MAX_BATCH_SIZE
cache_dir = str(tmp_path / "test_limit_parquet_writes_cache_dir")
builder = ParametrizedGeneratorBasedBuilder(generator=long_generator, cache_dir=cache_dir)
with limit_parquet_writes(builder, max_dataset_size=max_dataset_size) as limiter:
builder.download_and_prepare(file_format="parquet")
assert builder.info.dataset_size == limiter.total_bytes <= expected_max_dataset_size
assert builder.info.splits["train"].num_examples == num_examples < expected_max_num_examples
@pytest.mark.parametrize(
"validate,too_big_row_groups",
[
(None, False),
(ParquetFileValidator(max_row_group_byte_size=1).validate, True),
(ParquetFileValidator(max_row_group_byte_size=100_000).validate, False),
],
)
def test_fill_builder_info(
hub_responses_big: HubDatasetTest,
app_config: AppConfig,
tmp_path: Path,
validate: Optional[Callable[[pq.ParquetFile], None]],
too_big_row_groups: bool,
) -> None:
cache_dir = str(tmp_path / "test_fill_builder_info")
name = hub_responses_big["name"]
builder = load_dataset_builder(name, cache_dir=cache_dir)
builder.info = datasets.info.DatasetInfo()
if too_big_row_groups:
with pytest.raises(TooBigRowGroupsError) as exc_info:
fill_builder_info(builder, hf_endpoint=app_config.common.hf_endpoint, hf_token=None, validate=validate)
assert isinstance(exc_info.value, TooBigRowGroupsError)
assert isinstance(exc_info.value.num_rows, int)
assert isinstance(exc_info.value.row_group_byte_size, int)
else:
fill_builder_info(builder, hf_endpoint=app_config.common.hf_endpoint, hf_token=None, validate=validate)
expected_info = hub_responses_big["parquet_and_info_response"]["dataset_info"]
assert expected_info == asdict(builder.info)
@pytest.mark.parametrize(
"num_rows, row_group_byte_size, max_row_group_byte_size, expected",
[
(1000, 1000, 500, 100),
(1000, 1000_000, 500_000, 100),
(123456789, 123456789, 1000, 100),
(987654321, 987654321, 1000, 900),
(1000, 10, 1000, 1000),
(10, 1000, 1000, 100),
],
)
def test_get_writer_batch_size_from_row_group_size(
num_rows: int, row_group_byte_size: int, max_row_group_byte_size: int, expected: int
) -> None:
writer_batch_size = get_writer_batch_size_from_row_group_size(
num_rows=num_rows, row_group_byte_size=row_group_byte_size, max_row_group_byte_size=max_row_group_byte_size
)
assert writer_batch_size == expected
| datasets-server-main | services/worker/tests/job_runners/config/test_parquet_and_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.is_valid import ConfigIsValidJobRunner
from ..utils import UpstreamResponse
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, AppConfig], ConfigIsValidJobRunner]
DATASET = "dataset"
CONFIG = "config"
SPLIT_1 = "split1"
SPLIT_2 = "split2"
UPSTREAM_RESPONSE_SPLIT_NAMES: UpstreamResponse = UpstreamResponse(
kind="config-split-names-from-streaming",
dataset=DATASET,
config=CONFIG,
http_status=HTTPStatus.OK,
content={
"splits": [
{"dataset": DATASET, "config": CONFIG, "split": SPLIT_1},
{"dataset": DATASET, "config": CONFIG, "split": SPLIT_2},
]
},
)
UPSTREAM_RESPONSE_SPLIT_1_OK: UpstreamResponse = UpstreamResponse(
kind="split-is-valid",
dataset=DATASET,
config=CONFIG,
split=SPLIT_1,
http_status=HTTPStatus.OK,
content={"viewer": True, "preview": True, "search": True},
)
UPSTREAM_RESPONSE_SPLIT_1_OK_VIEWER: UpstreamResponse = UpstreamResponse(
kind="split-is-valid",
dataset=DATASET,
config=CONFIG,
split=SPLIT_1,
http_status=HTTPStatus.OK,
content={"viewer": True, "preview": False, "search": False},
)
UPSTREAM_RESPONSE_SPLIT_2_OK_SEARCH: UpstreamResponse = UpstreamResponse(
kind="split-is-valid",
dataset=DATASET,
config=CONFIG,
split=SPLIT_2,
http_status=HTTPStatus.OK,
content={"viewer": False, "preview": False, "search": True},
)
UPSTREAM_RESPONSE_SPLIT_2_OK: UpstreamResponse = UpstreamResponse(
kind="split-is-valid",
dataset=DATASET,
config=CONFIG,
split=SPLIT_2,
http_status=HTTPStatus.OK,
content={"viewer": True, "preview": True, "search": True},
)
UPSTREAM_RESPONSE_SPLIT_1_ERROR: UpstreamResponse = UpstreamResponse(
kind="split-is-valid",
dataset=DATASET,
config=CONFIG,
split=SPLIT_1,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
)
UPSTREAM_RESPONSE_SPLIT_2_ERROR: UpstreamResponse = UpstreamResponse(
kind="split-is-valid",
dataset=DATASET,
config=CONFIG,
split=SPLIT_2,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
)
EXPECTED_COMPLETED_ALL_FALSE = (
{"viewer": False, "preview": False, "search": False},
1.0,
)
EXPECTED_ALL_MIXED = (
{"viewer": True, "preview": False, "search": True},
1.0,
)
EXPECTED_COMPLETED_ALL_TRUE = (
{"viewer": True, "preview": True, "search": True},
1.0,
)
EXPECTED_PENDING_ALL_TRUE = (
{"viewer": True, "preview": True, "search": True},
0.5,
)
EXPECTED_PENDING_ALL_FALSE = (
{"viewer": False, "preview": False, "search": False},
0.0,
)
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigIsValidJobRunner:
processing_step_name = ConfigIsValidJobRunner.get_job_type()
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigIsValidJobRunner(
job_info={
"type": ConfigIsValidJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"config": config,
"split": None,
"revision": "revision",
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 20,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"upstream_responses,expected",
[
(
[
UPSTREAM_RESPONSE_SPLIT_1_OK,
UPSTREAM_RESPONSE_SPLIT_2_OK,
],
EXPECTED_COMPLETED_ALL_TRUE,
),
(
[
UPSTREAM_RESPONSE_SPLIT_1_OK,
],
EXPECTED_PENDING_ALL_TRUE,
),
(
[
UPSTREAM_RESPONSE_SPLIT_1_ERROR,
UPSTREAM_RESPONSE_SPLIT_2_ERROR,
],
EXPECTED_COMPLETED_ALL_FALSE,
),
([UPSTREAM_RESPONSE_SPLIT_1_OK_VIEWER, UPSTREAM_RESPONSE_SPLIT_2_OK_SEARCH], EXPECTED_ALL_MIXED),
(
[],
EXPECTED_PENDING_ALL_FALSE,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
upstream_responses: list[UpstreamResponse],
expected: Any,
) -> None:
dataset, config = DATASET, CONFIG
upsert_response(**UPSTREAM_RESPONSE_SPLIT_NAMES)
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, config, app_config)
compute_result = job_runner.compute()
assert compute_result.content == expected[0]
assert compute_result.progress == expected[1]
| datasets-server-main | services/worker/tests/job_runners/config/test_is_valid.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from dataclasses import replace
from http import HTTPStatus
import pytest
from libcommon.exceptions import CustomError, DatasetManualDownloadError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.split_names_from_streaming import (
ConfigSplitNamesFromStreamingJobRunner,
compute_split_names_from_streaming_response,
)
from worker.resources import LibrariesResource
from ...fixtures.hub import HubDatasetTest, get_default_config_split
GetJobRunner = Callable[[str, str, AppConfig], ConfigSplitNamesFromStreamingJobRunner]
@pytest.fixture
def get_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigSplitNamesFromStreamingJobRunner:
processing_step_name = ConfigSplitNamesFromStreamingJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": ConfigSplitNamesFromStreamingJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigSplitNamesFromStreamingJobRunner(
job_info={
"type": ConfigSplitNamesFromStreamingJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public_csv: str) -> None:
dataset = hub_public_csv
config, _ = get_default_config_split()
job_runner = get_job_runner(dataset, config, app_config)
response = job_runner.compute()
content = response.content
assert len(content["splits"]) == 1
@pytest.mark.parametrize(
"name,use_token,error_code,cause",
[
("public", False, None, None),
("audio", False, None, None),
("gated", True, None, None),
("private", True, None, None),
("empty", False, "EmptyDatasetError", "EmptyDatasetError"),
# should we really test the following cases?
# The assumption is that the dataset exists and is accessible with the token
("does_not_exist", False, "SplitNamesFromStreamingError", "FileNotFoundError"),
("gated", False, "SplitNamesFromStreamingError", "FileNotFoundError"),
("private", False, "SplitNamesFromStreamingError", "FileNotFoundError"),
],
)
def test_compute_split_names_from_streaming_response(
hub_responses_public: HubDatasetTest,
hub_responses_audio: HubDatasetTest,
hub_responses_gated: HubDatasetTest,
hub_responses_private: HubDatasetTest,
hub_responses_empty: HubDatasetTest,
hub_responses_does_not_exist: HubDatasetTest,
get_job_runner: GetJobRunner,
name: str,
use_token: bool,
error_code: str,
cause: str,
app_config: AppConfig,
) -> None:
hub_datasets = {
"public": hub_responses_public,
"audio": hub_responses_audio,
"gated": hub_responses_gated,
"private": hub_responses_private,
"empty": hub_responses_empty,
"does_not_exist": hub_responses_does_not_exist,
}
dataset = hub_datasets[name]["name"]
config, _ = get_default_config_split()
expected_configs_response = hub_datasets[name]["splits_response"]
job_runner = get_job_runner(
dataset,
config,
app_config if use_token else replace(app_config, common=replace(app_config.common, hf_token=None)),
)
if error_code is None:
result = job_runner.compute().content
assert result == expected_configs_response
return
with pytest.raises(CustomError) as exc_info:
job_runner.compute()
assert exc_info.value.code == error_code
assert exc_info.value.cause_exception == cause
if exc_info.value.disclose_cause:
response = exc_info.value.as_response()
assert set(response.keys()) == {"error", "cause_exception", "cause_message", "cause_traceback"}
response_dict = dict(response)
# ^ to remove mypy warnings
assert response_dict["cause_exception"] == cause
assert isinstance(response_dict["cause_traceback"], list)
assert response_dict["cause_traceback"][0] == "Traceback (most recent call last):\n"
def test_compute_split_names_from_streaming_response_raises(
hub_public_manual_download: str, app_config: AppConfig
) -> None:
with pytest.raises(DatasetManualDownloadError):
compute_split_names_from_streaming_response(
hub_public_manual_download, "default", hf_token=app_config.common.hf_token
)
| datasets-server-main | services/worker/tests/job_runners/config/test_split_names_from_streaming.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedArtifactNotFoundError,
upsert_response,
)
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.config.info import ConfigInfoJobRunner
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, str, AppConfig], ConfigInfoJobRunner]
CONFIG_INFO_1 = {
"description": "_DESCRIPTION",
"citation": "_CITATION",
"homepage": "_HOMEPAGE",
"license": "_LICENSE",
"features": {
"image": {"_type": "Image"},
"label": {
"names": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
"_type": "ClassLabel",
},
},
"splits": {
"train": {
"name": "train",
"num_bytes": 17470800,
"num_examples": 60000,
"dataset_name": "dataset_ok",
},
"test": {
"name": "test",
"num_bytes": 2916432,
"num_examples": 10000,
"dataset_name": "dataset_ok",
},
},
"builder_name": "dataset_ok",
"config_name": "config_1",
"version": {"version_str": "0.0.0", "major": 0, "minor": 0, "patch": 0},
"download_checksums": {
"https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz": {
"num_bytes": 9912422,
"checksum": "440fcabf73cc546fa21475e81ea370265605f56be210a4024d2ca8f203523609",
},
"https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz": {
"num_bytes": 28881,
"checksum": "3552534a0a558bbed6aed32b30c495cca23d567ec52cac8be1a0730e8010255c",
},
"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz": {
"num_bytes": 1648877,
"checksum": "8d422c7b0a1c1c79245a5bcf07fe86e33eeafee792b84584aec276f5a2dbc4e6",
},
"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz": {
"num_bytes": 4542,
"checksum": "f7ae60f92e00ec6debd23a6088c31dbd2371eca3ffa0defaefb259924204aec6",
},
},
"download_size": 11594722,
"dataset_size": 20387232,
"size_in_bytes": 31981954,
}
CONFIG_INFO_2 = {
"description": "_DESCRIPTION",
"citation": "_CITATION",
"homepage": "_HOMEPAGE",
"license": "_LICENSE",
"features": {
"image": {"_type": "Image"},
"image2": {"_type": "Image"},
"label": {
"names": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
"_type": "ClassLabel",
},
},
"splits": {
"train": {
"name": "train",
"num_bytes": 5678,
"num_examples": 3000,
"dataset_name": "dataset_ok",
},
"test": {
"name": "test",
"num_bytes": 1234,
"num_examples": 1000,
"dataset_name": "dataset_ok",
},
},
"builder_name": "dataset_ok",
"config_name": "config_2",
"version": {"version_str": "0.0.0", "major": 0, "minor": 0, "patch": 0},
"download_checksums": {
"https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz": {
"num_bytes": 9912422,
"checksum": "440fcabf73cc546fa21475e81ea370265605f56be210a4024d2ca8f203523609",
},
},
"download_size": 9912422,
"dataset_size": 6912,
"size_in_bytes": 9919334,
}
DATASET_INFO_OK = {
"config_1": CONFIG_INFO_1,
"config_2": CONFIG_INFO_2,
}
PARQUET_FILES = [
{"dataset": "dataset_ok", "config": "config_1", "split": "train", "size": 14281188},
{"dataset": "dataset_ok", "config": "config_1", "split": "test", "size": 2383903},
{"dataset": "dataset_ok", "config": "config_2", "split": "train", "size": 1234},
{"dataset": "dataset_ok", "config": "config_2", "split": "train", "size": 6789},
{"dataset": "dataset_ok", "config": "config_2", "split": "test", "size": 2383903},
]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
config: str,
app_config: AppConfig,
) -> ConfigInfoJobRunner:
processing_step_name = ConfigInfoJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
"dataset-level": {"input_type": "dataset"},
processing_step_name: {
"input_type": "dataset",
"job_runner_version": ConfigInfoJobRunner.get_job_runner_version(),
"triggered_by": "dataset-level",
},
}
)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={"config_names": [{"dataset": dataset, "config": config}]},
http_status=HTTPStatus.OK,
)
return ConfigInfoJobRunner(
job_info={
"type": ConfigInfoJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": config,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config,upstream_status,upstream_content,expected_error_code,expected_content,should_raise",
[
(
"dataset_ok",
"config_1",
HTTPStatus.OK,
{"parquet_files": PARQUET_FILES, "dataset_info": CONFIG_INFO_1, "partial": False},
None,
{"dataset_info": CONFIG_INFO_1, "partial": False},
False,
),
(
"status_error",
"config_1",
HTTPStatus.NOT_FOUND,
{"error": "error"},
CachedArtifactError.__name__,
None,
True,
),
(
"format_error",
"config_1",
HTTPStatus.OK,
{"not_dataset_info": "wrong_format"},
PreviousStepFormatError.__name__,
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config: str,
upstream_status: HTTPStatus,
upstream_content: Any,
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
upsert_response(
kind="config-parquet-and-info",
dataset=dataset,
config=config,
content=upstream_content,
http_status=upstream_status,
)
job_runner = get_job_runner(dataset, config, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = config = "doesnotexist"
job_runner = get_job_runner(dataset, config, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/config/test_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedArtifactNotFoundError,
upsert_response,
)
from libcommon.utils import Priority, SplitHubFile
from worker.config import AppConfig
from worker.dtos import ConfigParquetResponse, DatasetParquetResponse
from worker.job_runners.dataset.parquet import DatasetParquetJobRunner
from ..utils import UpstreamResponse
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, AppConfig], DatasetParquetJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetParquetJobRunner:
processing_step_name = DatasetParquetJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DatasetParquetJobRunner.get_job_runner_version(),
}
}
)
return DatasetParquetJobRunner(
job_info={
"type": DatasetParquetJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": None,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,upstream_responses,expected_error_code,expected_content,should_raise",
[
(
"ok",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="ok",
config=None,
http_status=HTTPStatus.OK,
content={
"config_names": [
{"dataset": "dataset_ok", "config": "config_1"},
{"dataset": "dataset_ok", "config": "config_2"},
],
},
),
UpstreamResponse(
kind="config-parquet",
dataset="ok",
config="config_1",
http_status=HTTPStatus.OK,
content=ConfigParquetResponse(
parquet_files=[
SplitHubFile(
dataset="ok",
config="config_1",
split="train",
url="url1",
filename="filename1",
size=0,
),
],
partial=False,
features=None,
),
),
UpstreamResponse(
kind="config-parquet",
dataset="ok",
config="config_2",
http_status=HTTPStatus.OK,
content=ConfigParquetResponse(
parquet_files=[
SplitHubFile(
dataset="ok",
config="config_2",
split="train",
url="url2",
filename="filename2",
size=0,
),
],
partial=False,
features=None,
),
),
],
None,
DatasetParquetResponse(
parquet_files=[
SplitHubFile(
dataset="ok", config="config_1", split="train", url="url1", filename="filename1", size=0
),
SplitHubFile(
dataset="ok", config="config_2", split="train", url="url2", filename="filename2", size=0
),
],
pending=[],
failed=[],
partial=False,
),
False,
),
(
"status_error",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="status_error",
config=None,
http_status=HTTPStatus.NOT_FOUND,
content={"error": "error"},
)
],
CachedArtifactError.__name__,
None,
True,
),
(
"format_error",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="format_error",
config=None,
http_status=HTTPStatus.OK,
content={"not_parquet_files": "wrong_format"},
)
],
PreviousStepFormatError.__name__,
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
upstream_responses: list[UpstreamResponse],
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "doesnotexist"
job_runner = get_job_runner(dataset, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/dataset/test_parquet.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from dataclasses import replace
from unittest.mock import patch
import pytest
from libcommon.exceptions import CustomError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.dataset.config_names import DatasetConfigNamesJobRunner
from worker.resources import LibrariesResource
from ...fixtures.hub import HubDatasetTest
GetJobRunner = Callable[[str, AppConfig], DatasetConfigNamesJobRunner]
@pytest.fixture
def get_job_runner(
libraries_resource: LibrariesResource,
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetConfigNamesJobRunner:
processing_step_name = DatasetConfigNamesJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DatasetConfigNamesJobRunner.get_job_runner_version(),
}
}
)
return DatasetConfigNamesJobRunner(
job_info={
"type": DatasetConfigNamesJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": None,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
hf_datasets_cache=libraries_resource.hf_datasets_cache,
)
return _get_job_runner
def test_compute(app_config: AppConfig, hub_public_csv: str, get_job_runner: GetJobRunner) -> None:
dataset = hub_public_csv
job_runner = get_job_runner(dataset, app_config)
response = job_runner.compute()
content = response.content
assert len(content["config_names"]) == 1
@pytest.mark.parametrize(
"max_number_of_configs,error_code",
[
(1, "DatasetWithTooManyConfigsError"),
(2, None),
(3, None),
],
)
def test_compute_too_many_configs(
app_config: AppConfig, get_job_runner: GetJobRunner, max_number_of_configs: int, error_code: str
) -> None:
dataset = "dataset"
configs = ["config_1", "config_2"]
job_runner = get_job_runner(
dataset,
replace(app_config, config_names=replace(app_config.config_names, max_number=max_number_of_configs)),
)
with patch("worker.job_runners.dataset.config_names.get_dataset_config_names", return_value=configs):
if error_code:
with pytest.raises(CustomError) as exc_info:
job_runner.compute()
assert exc_info.value.code == error_code
else:
assert job_runner.compute() is not None
@pytest.mark.parametrize(
"name,use_token,error_code,cause",
[
("public", False, None, None),
("audio", False, None, None),
("gated", True, None, None),
("private", True, None, None),
("empty", False, "EmptyDatasetError", "EmptyDatasetError"),
# should we really test the following cases?
# The assumption is that the dataset exists and is accessible with the token
("does_not_exist", False, "ConfigNamesError", "FileNotFoundError"),
("gated", False, "ConfigNamesError", "FileNotFoundError"),
("private", False, "ConfigNamesError", "FileNotFoundError"),
],
)
def test_compute_splits_response_simple_csv(
hub_responses_public: HubDatasetTest,
hub_responses_audio: HubDatasetTest,
hub_responses_gated: HubDatasetTest,
hub_responses_private: HubDatasetTest,
hub_responses_empty: HubDatasetTest,
hub_responses_does_not_exist: HubDatasetTest,
get_job_runner: GetJobRunner,
name: str,
use_token: bool,
error_code: str,
cause: str,
app_config: AppConfig,
) -> None:
hub_datasets = {
"public": hub_responses_public,
"audio": hub_responses_audio,
"gated": hub_responses_gated,
"private": hub_responses_private,
"empty": hub_responses_empty,
"does_not_exist": hub_responses_does_not_exist,
}
dataset = hub_datasets[name]["name"]
expected_configs_response = hub_datasets[name]["config_names_response"]
job_runner = get_job_runner(
dataset,
app_config if use_token else replace(app_config, common=replace(app_config.common, hf_token=None)),
)
if error_code is None:
result = job_runner.compute().content
assert result == expected_configs_response
return
with pytest.raises(CustomError) as exc_info:
job_runner.compute()
assert exc_info.value.code == error_code
assert exc_info.value.cause_exception == cause
if exc_info.value.disclose_cause:
response = exc_info.value.as_response()
assert set(response.keys()) == {"error", "cause_exception", "cause_message", "cause_traceback"}
response_dict = dict(response)
# ^ to remove mypy warnings
assert response_dict["cause_exception"] == cause
assert isinstance(response_dict["cause_traceback"], list)
assert response_dict["cause_traceback"][0] == "Traceback (most recent call last):\n"
| datasets-server-main | services/worker/tests/job_runners/dataset/test_config_names.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedArtifactNotFoundError, upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.dataset.split_names import DatasetSplitNamesJobRunner
GetJobRunner = Callable[[str, AppConfig], DatasetSplitNamesJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetSplitNamesJobRunner:
processing_step_name = DatasetSplitNamesJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DatasetSplitNamesJobRunner.get_job_runner_version(),
}
}
)
return DatasetSplitNamesJobRunner(
job_info={
"type": DatasetSplitNamesJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": None,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,split_names,expected_content,progress",
[
(
"pending_response",
[
{
"config": "config_a",
"response": {
"splits": [
{
"dataset": "pending_response",
"config": "config_a",
"split": "split_a",
}
]
},
}
],
{
"splits": [
{
"dataset": "pending_response",
"config": "config_a",
"split": "split_a",
},
],
"pending": [{"dataset": "pending_response", "config": "config_b"}],
"failed": [],
},
0.5,
),
(
"complete",
[
{
"config": "config_a",
"response": {
"splits": [
{
"dataset": "complete",
"config": "config_a",
"split": "split_a",
}
]
},
},
{
"config": "config_b",
"response": {
"splits": [
{
"dataset": "complete",
"config": "config_b",
"split": "split_b",
}
]
},
},
],
{
"splits": [
{
"dataset": "complete",
"config": "config_a",
"split": "split_a",
},
{
"dataset": "complete",
"config": "config_b",
"split": "split_b",
},
],
"pending": [],
"failed": [],
},
1,
),
],
)
def test_compute_progress(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
split_names: Any,
expected_content: Any,
progress: float,
) -> None:
# we could also have tested if dataset-info has a response (it's one case among many, see
# libcommon.simple_cache.get_best_response)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={
"config_names": [
{
"dataset": dataset,
"config": "config_a",
},
{"dataset": dataset, "config": "config_b"},
]
},
http_status=HTTPStatus.OK,
)
for config in split_names:
# we don't really need both parent responses here, but why not (it's one case among many, see
# libcommon.simple_cache.get_best_response)
upsert_response(
kind="config-split-names-from-info",
dataset=dataset,
config=config["config"],
content=config["response"],
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config["config"],
content=config["response"],
http_status=HTTPStatus.OK,
)
job_runner = get_job_runner(dataset, app_config)
response = job_runner.compute()
assert response.content == expected_content
assert response.progress == progress
def test_compute_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "error"
config = "error"
# we could also have tested if dataset-info has a response (it's one case among many, see
# libcommon.simple_cache.get_best_response)
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={
"config_names": [
{
"dataset": dataset,
"config": config,
}
]
},
http_status=HTTPStatus.OK,
)
# we don't really need both parent responses here, but why not (it's one case among many, see
# libcommon.simple_cache.get_best_response)
upsert_response(
kind="config-split-names-from-info",
dataset=dataset,
config=config,
content={},
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={},
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
job_runner = get_job_runner(dataset, app_config)
response = job_runner.compute()
assert response.content == {
"splits": [],
"failed": [{"dataset": dataset, "config": config, "error": {}}],
"pending": [],
}
assert response.progress == 1.0
def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "error"
config = "error"
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content={
"config_names": [
{
"dataset": dataset,
"config": config,
}
]
},
http_status=HTTPStatus.OK,
)
# here, 'config-split-names-from-info' will be picked because it's the first success response
# with progress==1.0 (see libcommon.simple_cache.get_best_response), but its format is wrong
# while the other one ('config-split-names-from-streaming') is correct
upsert_response(
kind="config-split-names-from-info",
dataset=dataset,
config=config,
content={"wrong_format": []},
http_status=HTTPStatus.OK,
)
upsert_response(
kind="config-split-names-from-streaming",
dataset=dataset,
config=config,
content={"splits": [{"dataset": "dataset", "config": "config", "split": "split"}]},
http_status=HTTPStatus.OK,
)
job_runner = get_job_runner(dataset, app_config)
with pytest.raises(PreviousStepFormatError):
job_runner.compute()
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "doesnotexist"
job_runner = get_job_runner(dataset, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/dataset/test_split_names.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedArtifactNotFoundError,
upsert_response,
)
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.dataset.size import DatasetSizeJobRunner
from ..utils import UpstreamResponse
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, AppConfig], DatasetSizeJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetSizeJobRunner:
processing_step_name = DatasetSizeJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DatasetSizeJobRunner.get_job_runner_version(),
}
}
)
return DatasetSizeJobRunner(
job_info={
"type": DatasetSizeJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": None,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,upstream_responses,expected_error_code,expected_content,should_raise",
[
(
"dataset_ok",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="dataset_ok",
config=None,
http_status=HTTPStatus.OK,
content={
"config_names": [
{"dataset": "dataset_ok", "config": "config_1"},
{"dataset": "dataset_ok", "config": "config_2"},
],
},
),
UpstreamResponse(
kind="config-size",
dataset="dataset_ok",
config="config_1",
http_status=HTTPStatus.OK,
content={
"size": {
"config": {
"dataset": "dataset_ok",
"config": "config_1",
"num_bytes_original_files": 11594722,
"num_bytes_parquet_files": 16665091,
"num_bytes_memory": 20387232,
"num_rows": 70000,
"num_columns": 2,
},
"splits": [
{
"dataset": "dataset_ok",
"config": "config_1",
"split": "train",
"num_bytes_parquet_files": 14281188,
"num_bytes_memory": 17470800,
"num_rows": 60000,
"num_columns": 2,
},
{
"dataset": "dataset_ok",
"config": "config_1",
"split": "test",
"num_bytes_parquet_files": 2383903,
"num_bytes_memory": 2916432,
"num_rows": 10000,
"num_columns": 2,
},
],
},
"partial": False,
},
),
UpstreamResponse(
kind="config-size",
dataset="dataset_ok",
config="config_2",
http_status=HTTPStatus.OK,
content={
"size": {
"config": {
"dataset": "dataset_ok",
"config": "config_2",
"num_bytes_original_files": 9912422,
"num_bytes_parquet_files": 2391926,
"num_bytes_memory": 6912,
"num_rows": 4000,
"num_columns": 3,
},
"splits": [
{
"dataset": "dataset_ok",
"config": "config_2",
"split": "train",
"num_bytes_parquet_files": 8023,
"num_bytes_memory": 5678,
"num_rows": 3000,
"num_columns": 3,
},
{
"dataset": "dataset_ok",
"config": "config_2",
"split": "test",
"num_bytes_parquet_files": 2383903,
"num_bytes_memory": 1234,
"num_rows": 1000,
"num_columns": 3,
},
],
},
"partial": False,
},
),
],
None,
{
"size": {
"dataset": {
"dataset": "dataset_ok",
"num_bytes_original_files": 21507144,
"num_bytes_parquet_files": 19057017,
"num_bytes_memory": 20394144,
"num_rows": 74000,
},
"configs": [
{
"dataset": "dataset_ok",
"config": "config_1",
"num_bytes_original_files": 11594722,
"num_bytes_parquet_files": 16665091,
"num_bytes_memory": 20387232,
"num_rows": 70000,
"num_columns": 2,
},
{
"dataset": "dataset_ok",
"config": "config_2",
"num_bytes_original_files": 9912422,
"num_bytes_parquet_files": 2391926,
"num_bytes_memory": 6912,
"num_rows": 4000,
"num_columns": 3,
},
],
"splits": [
{
"dataset": "dataset_ok",
"config": "config_1",
"split": "train",
"num_bytes_parquet_files": 14281188,
"num_bytes_memory": 17470800,
"num_rows": 60000,
"num_columns": 2,
},
{
"dataset": "dataset_ok",
"config": "config_1",
"split": "test",
"num_bytes_parquet_files": 2383903,
"num_bytes_memory": 2916432,
"num_rows": 10000,
"num_columns": 2,
},
{
"dataset": "dataset_ok",
"config": "config_2",
"split": "train",
"num_bytes_parquet_files": 8023,
"num_bytes_memory": 5678,
"num_rows": 3000,
"num_columns": 3,
},
{
"dataset": "dataset_ok",
"config": "config_2",
"split": "test",
"num_bytes_parquet_files": 2383903,
"num_bytes_memory": 1234,
"num_rows": 1000,
"num_columns": 3,
},
],
},
"failed": [],
"pending": [],
"partial": False,
},
False,
),
(
"status_error",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="status_error",
config=None,
http_status=HTTPStatus.NOT_FOUND,
content={"error": "error"},
)
],
CachedArtifactError.__name__,
None,
True,
),
(
"format_error",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="format_error",
config=None,
http_status=HTTPStatus.OK,
content={"not_dataset_info": "wrong_format"},
)
],
PreviousStepFormatError.__name__,
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
upstream_responses: list[UpstreamResponse],
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "doesnotexist"
job_runner = get_job_runner(dataset, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/dataset/test_size.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import pytest
from libcommon.exceptions import CustomError
from libcommon.processing_graph import ProcessingStep
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import CompleteJobResult
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
class DummyDatasetJobRunner(DatasetJobRunner):
@staticmethod
def get_job_runner_version() -> int:
return 1
@staticmethod
def get_job_type() -> str:
return "/dummy"
def compute(self) -> CompleteJobResult:
return CompleteJobResult({"key": "value"})
def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
with pytest.raises(CustomError) as exc_info:
DummyDatasetJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": None, # type: ignore
# ^ Needed to raise error
"revision": "revision",
"config": None,
"split": None,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
)
assert exc_info.value.code == "ParameterMissingError"
def test_success_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
assert (
DummyDatasetJobRunner(
job_info={
"job_id": "job_id",
"type": test_processing_step.job_type,
"params": {
"dataset": "dataset",
"revision": "revision",
"config": None,
"split": None,
},
"priority": Priority.NORMAL,
"difficulty": 50,
},
processing_step=test_processing_step,
app_config=app_config,
)
is not None
)
| datasets-server-main | services/worker/tests/job_runners/dataset/test_dataset_job_runner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/tests/job_runners/dataset/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedArtifactNotFoundError, upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.dataset.opt_in_out_urls_count import (
DatasetOptInOutUrlsCountJobRunner,
)
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, AppConfig], DatasetOptInOutUrlsCountJobRunner]
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetOptInOutUrlsCountJobRunner:
processing_step_name = DatasetOptInOutUrlsCountJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DatasetOptInOutUrlsCountJobRunner.get_job_runner_version(),
}
}
)
return DatasetOptInOutUrlsCountJobRunner(
job_info={
"type": DatasetOptInOutUrlsCountJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": None,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,config_names_status,config_names_content,config_upstream_status"
+ ",config_upstream_content,expected_error_code,expected_content,should_raise",
[
(
"dataset_ok_full_scan",
HTTPStatus.OK,
{
"config_names": [
{"dataset": "dataset_ok_full_scan", "config": "config1"},
{"dataset": "dataset_ok_full_scan", "config": "config2"},
]
},
[HTTPStatus.OK, HTTPStatus.OK],
[
{
"urls_columns": ["image_url", "url"],
"num_opt_in_urls": 10,
"num_opt_out_urls": 20,
"num_urls": 100,
"num_scanned_rows": 100,
"has_urls_columns": True,
"full_scan": True,
},
{
"urls_columns": ["image_url", "label", "url"],
"num_opt_in_urls": 10,
"num_opt_out_urls": 0,
"num_urls": 50,
"num_scanned_rows": 300,
"has_urls_columns": True,
"full_scan": True,
},
],
None,
{
"urls_columns": ["image_url", "label", "url"],
"num_opt_in_urls": 20,
"num_opt_out_urls": 20,
"num_urls": 150,
"num_scanned_rows": 400,
"has_urls_columns": True,
"full_scan": True,
},
False,
),
(
"dataset_ok_not_full_scan",
HTTPStatus.OK,
{
"config_names": [
{"dataset": "dataset_ok_not_full_scan", "config": "config1"},
{"dataset": "dataset_ok_not_full_scan", "config": "config2"},
]
},
[HTTPStatus.OK, HTTPStatus.OK],
[
{
"urls_columns": ["image_url", "url"],
"num_opt_in_urls": 10,
"num_opt_out_urls": 20,
"num_urls": 100,
"num_scanned_rows": 100,
"has_urls_columns": True,
"full_scan": False,
},
{
"urls_columns": ["image_url", "label", "url"],
"num_opt_in_urls": 10,
"num_opt_out_urls": 0,
"num_urls": 50,
"num_scanned_rows": 300,
"has_urls_columns": True,
"full_scan": True,
},
],
None,
{
"urls_columns": ["image_url", "label", "url"],
"num_opt_in_urls": 20,
"num_opt_out_urls": 20,
"num_urls": 150,
"num_scanned_rows": 400,
"has_urls_columns": True,
"full_scan": False,
},
False,
),
(
"previos_step_error",
HTTPStatus.INTERNAL_SERVER_ERROR,
{},
[],
[],
"CachedArtifactError",
None,
True,
),
(
"previous_step_format_error",
HTTPStatus.OK,
{
"config_names": [
{"dataset": "dataset_ok", "config": "config1"},
{"dataset": "dataset_ok", "config": "config2"},
]
},
[HTTPStatus.OK],
[{"wrong_format": None}],
"PreviousStepFormatError",
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
config_names_status: HTTPStatus,
config_names_content: Any,
config_upstream_status: list[HTTPStatus],
config_upstream_content: list[Any],
expected_error_code: str,
expected_content: Any,
should_raise: bool,
) -> None:
upsert_response(
kind="dataset-config-names",
dataset=dataset,
content=config_names_content,
http_status=config_names_status,
)
if config_names_status == HTTPStatus.OK:
for split_item, status, content in zip(
config_names_content["config_names"], config_upstream_status, config_upstream_content
):
upsert_response(
kind="config-opt-in-out-urls-count",
dataset=dataset,
config=split_item["config"],
content=content,
http_status=status,
)
job_runner = get_job_runner(dataset, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
assert job_runner.compute().content == expected_content
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "doesnotexist"
job_runner = get_job_runner(dataset, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.dataset.is_valid import DatasetIsValidJobRunner
from ..utils import UpstreamResponse
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, AppConfig], DatasetIsValidJobRunner]
DATASET = "dataset"
CONFIG_1 = "config1"
CONFIG_2 = "config2"
UPSTREAM_RESPONSE_CONFIG_NAMES: UpstreamResponse = UpstreamResponse(
kind="dataset-config-names",
dataset=DATASET,
http_status=HTTPStatus.OK,
content={
"config_names": [
{"dataset": DATASET, "config": CONFIG_1},
{"dataset": DATASET, "config": CONFIG_2},
]
},
)
UPSTREAM_RESPONSE_CONFIG_1_OK: UpstreamResponse = UpstreamResponse(
kind="config-is-valid",
dataset=DATASET,
config=CONFIG_1,
http_status=HTTPStatus.OK,
content={"viewer": True, "preview": True, "search": True},
)
UPSTREAM_RESPONSE_CONFIG_1_OK_VIEWER: UpstreamResponse = UpstreamResponse(
kind="config-is-valid",
dataset=DATASET,
config=CONFIG_1,
http_status=HTTPStatus.OK,
content={"viewer": True, "preview": False, "search": False},
)
UPSTREAM_RESPONSE_CONFIG_2_OK_SEARCH: UpstreamResponse = UpstreamResponse(
kind="config-is-valid",
dataset=DATASET,
config=CONFIG_2,
http_status=HTTPStatus.OK,
content={"viewer": False, "preview": False, "search": True},
)
UPSTREAM_RESPONSE_CONFIG_2_OK: UpstreamResponse = UpstreamResponse(
kind="config-is-valid",
dataset=DATASET,
config=CONFIG_2,
http_status=HTTPStatus.OK,
content={"viewer": True, "preview": True, "search": True},
)
UPSTREAM_RESPONSE_CONFIG_1_ERROR: UpstreamResponse = UpstreamResponse(
kind="config-is-valid",
dataset=DATASET,
config=CONFIG_1,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
)
UPSTREAM_RESPONSE_CONFIG_2_ERROR: UpstreamResponse = UpstreamResponse(
kind="config-is-valid",
dataset=DATASET,
config=CONFIG_2,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
)
EXPECTED_COMPLETED_ALL_FALSE = (
{"viewer": False, "preview": False, "search": False},
1.0,
)
EXPECTED_ALL_MIXED = (
{"viewer": True, "preview": False, "search": True},
1.0,
)
EXPECTED_COMPLETED_ALL_TRUE = (
{"viewer": True, "preview": True, "search": True},
1.0,
)
EXPECTED_PENDING_ALL_TRUE = (
{"viewer": True, "preview": True, "search": True},
0.5,
)
EXPECTED_PENDING_ALL_FALSE = (
{"viewer": False, "preview": False, "search": False},
0.0,
)
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetIsValidJobRunner:
processing_step_name = DatasetIsValidJobRunner.get_job_type()
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
return DatasetIsValidJobRunner(
job_info={
"type": DatasetIsValidJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"config": None,
"split": None,
"revision": "revision",
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 20,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"upstream_responses,expected",
[
(
[
UPSTREAM_RESPONSE_CONFIG_1_OK,
UPSTREAM_RESPONSE_CONFIG_2_OK,
],
EXPECTED_COMPLETED_ALL_TRUE,
),
(
[
UPSTREAM_RESPONSE_CONFIG_1_OK,
],
EXPECTED_PENDING_ALL_TRUE,
),
(
[
UPSTREAM_RESPONSE_CONFIG_1_ERROR,
UPSTREAM_RESPONSE_CONFIG_2_ERROR,
],
EXPECTED_COMPLETED_ALL_FALSE,
),
([UPSTREAM_RESPONSE_CONFIG_1_OK_VIEWER, UPSTREAM_RESPONSE_CONFIG_2_OK_SEARCH], EXPECTED_ALL_MIXED),
(
[],
EXPECTED_PENDING_ALL_FALSE,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
upstream_responses: list[UpstreamResponse],
expected: Any,
) -> None:
dataset = DATASET
upsert_response(**UPSTREAM_RESPONSE_CONFIG_NAMES)
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, app_config)
compute_result = job_runner.compute()
assert compute_result.content == expected[0]
assert compute_result.progress == expected[1]
| datasets-server-main | services/worker/tests/job_runners/dataset/test_is_valid.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedArtifactError, upsert_response
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.job_runners.dataset.hub_cache import DatasetHubCacheJobRunner
from ..utils import UpstreamResponse
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, AppConfig], DatasetHubCacheJobRunner]
DATASET = "dataset"
UPSTREAM_RESPONSE_IS_VALID_OK: UpstreamResponse = UpstreamResponse(
kind="dataset-is-valid",
dataset=DATASET,
http_status=HTTPStatus.OK,
content={"preview": True, "viewer": False, "search": True},
progress=0.5,
)
UPSTREAM_RESPONSE_IS_VALID_ERROR: UpstreamResponse = UpstreamResponse(
kind="dataset-is-valid",
dataset=DATASET,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
content={},
progress=0.0,
)
UPSTREAM_RESPONSE_SIZE_OK: UpstreamResponse = UpstreamResponse(
kind="dataset-size",
dataset=DATASET,
http_status=HTTPStatus.OK,
content={"size": {"dataset": {"num_rows": 1000}}, "partial": False},
progress=0.2,
)
UPSTREAM_RESPONSE_SIZE_NO_PROGRESS: UpstreamResponse = UpstreamResponse(
kind="dataset-size",
dataset=DATASET,
http_status=HTTPStatus.OK,
content={"size": {"dataset": {"num_rows": 1000}}, "partial": True},
progress=None,
)
EXPECTED_OK = (
{"viewer": False, "preview": True, "partial": False, "num_rows": 1000},
0.2,
)
EXPECTED_NO_PROGRESS = (
{"viewer": False, "preview": True, "partial": True, "num_rows": 1000},
0.5,
)
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetHubCacheJobRunner:
processing_step_name = DatasetHubCacheJobRunner.get_job_type()
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
return DatasetHubCacheJobRunner(
job_info={
"type": DatasetHubCacheJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"config": None,
"split": None,
"revision": "revision",
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 20,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"upstream_responses,expected",
[
(
[
UPSTREAM_RESPONSE_IS_VALID_OK,
UPSTREAM_RESPONSE_SIZE_OK,
],
EXPECTED_OK,
),
(
[
UPSTREAM_RESPONSE_IS_VALID_OK,
UPSTREAM_RESPONSE_SIZE_NO_PROGRESS,
],
EXPECTED_NO_PROGRESS,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
upstream_responses: list[UpstreamResponse],
expected: Any,
) -> None:
dataset = DATASET
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, app_config)
compute_result = job_runner.compute()
assert compute_result.content == expected[0]
assert compute_result.progress == expected[1]
@pytest.mark.parametrize(
"upstream_responses,expectation",
[
(
[
UPSTREAM_RESPONSE_IS_VALID_ERROR,
UPSTREAM_RESPONSE_SIZE_OK,
],
pytest.raises(CachedArtifactError),
)
],
)
def test_compute_error(
app_config: AppConfig,
get_job_runner: GetJobRunner,
upstream_responses: list[UpstreamResponse],
expectation: Any,
) -> None:
dataset = DATASET
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, app_config)
with expectation:
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/dataset/test_hub_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Callable
from http import HTTPStatus
from typing import Any
import pytest
from libcommon.exceptions import PreviousStepFormatError
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedArtifactNotFoundError,
upsert_response,
)
from libcommon.utils import Priority
from worker.config import AppConfig
from worker.dtos import PreviousJob
from worker.job_runners.dataset.info import DatasetInfoJobRunner
from ..config.test_info import CONFIG_INFO_1, CONFIG_INFO_2, DATASET_INFO_OK
from ..utils import UpstreamResponse
@pytest.fixture(autouse=True)
def prepare_and_clean_mongo(app_config: AppConfig) -> None:
# prepare the database before each test, and clean it afterwards
pass
GetJobRunner = Callable[[str, AppConfig], DatasetInfoJobRunner]
UPSTREAM_RESPONSE_CONFIG_NAMES: UpstreamResponse = UpstreamResponse(
kind="dataset-config-names",
dataset="dataset_ok",
config=None,
http_status=HTTPStatus.OK,
content={
"config_names": [
{"dataset": "dataset_ok", "config": "config_1"},
{"dataset": "dataset_ok", "config": "config_2"},
],
},
)
UPSTREAM_RESPONSE_CONFIG_INFO_1: UpstreamResponse = UpstreamResponse(
kind="config-info",
dataset="dataset_ok",
config="config_1",
http_status=HTTPStatus.OK,
content={"dataset_info": CONFIG_INFO_1, "partial": False},
)
UPSTREAM_RESPONSE_CONFIG_INFO_2: UpstreamResponse = UpstreamResponse(
kind="config-info",
dataset="dataset_ok",
config="config_2",
http_status=HTTPStatus.OK,
content={"dataset_info": CONFIG_INFO_2, "partial": False},
)
EXPECTED_OK = (
{
"dataset_info": DATASET_INFO_OK,
"pending": [],
"failed": [],
"partial": False,
},
1.0,
)
EXPECTED_PARTIAL_PENDING = (
{
"dataset_info": {
"config_1": CONFIG_INFO_1,
},
"pending": [
PreviousJob(
kind="config-info",
dataset="dataset_ok",
config="config_2",
split=None,
)
],
"failed": [],
"partial": False,
},
0.5,
)
EXPECTED_PARTIAL_FAILED = (
{
"dataset_info": {
"config_1": CONFIG_INFO_1,
},
"pending": [],
"failed": [
PreviousJob(
kind="config-info",
dataset="dataset_ok",
config="config_2",
split=None,
)
],
"partial": False,
},
1.0,
)
@pytest.fixture
def get_job_runner(
cache_mongo_resource: CacheMongoResource,
queue_mongo_resource: QueueMongoResource,
) -> GetJobRunner:
def _get_job_runner(
dataset: str,
app_config: AppConfig,
) -> DatasetInfoJobRunner:
processing_step_name = DatasetInfoJobRunner.get_job_type()
processing_graph = ProcessingGraph(
{
processing_step_name: {
"input_type": "dataset",
"job_runner_version": DatasetInfoJobRunner.get_job_runner_version(),
}
}
)
return DatasetInfoJobRunner(
job_info={
"type": DatasetInfoJobRunner.get_job_type(),
"params": {
"dataset": dataset,
"revision": "revision",
"config": None,
"split": None,
},
"job_id": "job_id",
"priority": Priority.NORMAL,
"difficulty": 50,
},
app_config=app_config,
processing_step=processing_graph.get_processing_step(processing_step_name),
)
return _get_job_runner
@pytest.mark.parametrize(
"dataset,upstream_responses,expected_error_code,expected,should_raise",
[
(
"dataset_ok",
[
UPSTREAM_RESPONSE_CONFIG_NAMES,
UPSTREAM_RESPONSE_CONFIG_INFO_1,
UPSTREAM_RESPONSE_CONFIG_INFO_2,
],
None,
EXPECTED_OK,
False,
),
(
"dataset_ok",
[UPSTREAM_RESPONSE_CONFIG_NAMES, UPSTREAM_RESPONSE_CONFIG_INFO_1],
None,
EXPECTED_PARTIAL_PENDING,
False,
),
(
"dataset_ok",
[
UPSTREAM_RESPONSE_CONFIG_NAMES,
UPSTREAM_RESPONSE_CONFIG_INFO_1,
UpstreamResponse(
kind="config-info",
dataset="dataset_ok",
config="config_2",
http_status=HTTPStatus.NOT_FOUND,
content={"error": "error"},
),
],
None,
EXPECTED_PARTIAL_FAILED,
False,
),
(
"status_error",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="status_error",
config=None,
http_status=HTTPStatus.NOT_FOUND,
content={"error": "error"},
)
],
CachedArtifactError.__name__,
None,
True,
),
(
"format_error",
[
UpstreamResponse(
kind="dataset-config-names",
dataset="format_error",
config=None,
http_status=HTTPStatus.OK,
content={"not_dataset_info": "wrong_format"},
)
],
PreviousStepFormatError.__name__,
None,
True,
),
],
)
def test_compute(
app_config: AppConfig,
get_job_runner: GetJobRunner,
dataset: str,
upstream_responses: list[UpstreamResponse],
expected_error_code: str,
expected: Any,
should_raise: bool,
) -> None:
for upstream_response in upstream_responses:
upsert_response(**upstream_response)
job_runner = get_job_runner(dataset, app_config)
if should_raise:
with pytest.raises(Exception) as e:
job_runner.compute()
assert e.typename == expected_error_code
else:
compute_result = job_runner.compute()
assert compute_result.content == expected[0]
assert compute_result.progress == expected[1]
def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
dataset = "doesnotexist"
job_runner = get_job_runner(dataset, app_config)
with pytest.raises(CachedArtifactNotFoundError):
job_runner.compute()
| datasets-server-main | services/worker/tests/job_runners/dataset/test_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import csv
import json
import pandas as pd
import pytest
DATA = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session")
def csv_path(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def data_df(csv_path: str) -> pd.DataFrame:
# from the CSV file, not the DATA variable, because the CSV file does not respect the first column type
# we have to follow the same behavior
return pd.read_csv(csv_path)
JSONL = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": None, "col_2": 1, "col_3": 1.0},
{"col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session")
def jsonl_path(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(path, "w", newline="") as f:
f.writelines(json.dumps(o) for o in JSONL)
return path
@pytest.fixture(scope="session")
def extra_fields_readme(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "README.md")
lines = [
"---",
'extra_gated_prompt: "You agree not to attempt to determine the identity of individuals in this dataset"',
"extra_gated_fields:",
" Company: text",
" Country: text",
" I agree to use this model for non-commercial use ONLY: checkbox",
"---",
]
with open(path, "w", newline="") as f:
f.writelines(f"{line}\n" for line in lines)
return path
DATASET_SCRIPT_WITH_EXTERNAL_FILES_CONTENT = """
import datasets
_URLS = {
"train": [
"https://huggingface.co/datasets/lhoestq/test/resolve/main/some_text.txt",
"https://huggingface.co/datasets/lhoestq/test/resolve/main/another_text.txt",
]
}
class Test(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"text": datasets.Value("string"),
}
),
homepage="https://huggingface.co/datasets/lhoestq/test",
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
]
def _generate_examples(self, filepaths):
_id = 0
for filepath in filepaths:
with open(filepath, encoding="utf-8") as f:
for line in f:
yield _id, {"text": line.rstrip()}
_id += 1
"""
@pytest.fixture(scope="session")
def dataset_script_with_external_files_path(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "{dataset_name}.py")
with open(path, "w", newline="") as f:
f.write(DATASET_SCRIPT_WITH_EXTERNAL_FILES_CONTENT)
return path
DATASET_SCRIPT_WITH_TWO_CONFIGS = """
import os
import datasets
from datasets import DatasetInfo, BuilderConfig, Features, Split, SplitGenerator, Value
class DummyDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [BuilderConfig(name="first"), BuilderConfig(name="second")]
def _info(self) -> DatasetInfo:
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(Split.TRAIN, gen_kwargs={"text": self.config.name}),
SplitGenerator(Split.TEST, gen_kwargs={"text": self.config.name}),
]
def _generate_examples(self, text, **kwargs):
for i in range(1000):
yield i, {"text": text}
"""
@pytest.fixture(scope="session")
def dataset_script_with_two_configs_path(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "{dataset_name}.py")
with open(path, "w", newline="") as f:
f.write(DATASET_SCRIPT_WITH_TWO_CONFIGS)
return path
# N = 15
DATASET_SCRIPT_WITH_N_CONFIGS = """
import os
import datasets
from datasets import DatasetInfo, BuilderConfig, Features, Split, SplitGenerator, Value
class DummyDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [BuilderConfig(name="config"+str(i)) for i in range(15)]
def _info(self) -> DatasetInfo:
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(Split.TRAIN, gen_kwargs={"text": self.config.name}),
]
def _generate_examples(self, text, **kwargs):
for i in range(1000):
yield i, {"text": text}
"""
@pytest.fixture(scope="session")
def dataset_script_with_n_configs_path(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "{dataset_name}.py")
with open(path, "w", newline="") as f:
f.write(DATASET_SCRIPT_WITH_N_CONFIGS)
return path
DATASET_SCRIPT_WITH_MANUAL_DOWNLOAD = """
import os
import datasets
from datasets import DatasetInfo, BuilderConfig, Features, Split, SplitGenerator, Value
class DummyDatasetManualDownload(datasets.GeneratorBasedBuilder):
@property
def manual_download_instructions(self):
return "To use DummyDatasetManualDownload you have to download it manually."
def _info(self) -> DatasetInfo:
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(Split.TRAIN, gen_kwargs={"text": self.config.name}),
]
def _generate_examples(self, text, **kwargs):
for i in range(1000):
yield i, {"text": text}
"""
@pytest.fixture(scope="session")
def dataset_script_with_manual_download_path(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "{dataset_name}.py")
with open(path, "w", newline="") as f:
f.write(DATASET_SCRIPT_WITH_MANUAL_DOWNLOAD)
return path
| datasets-server-main | services/worker/tests/fixtures/files.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import datetime
from collections.abc import Mapping
from pathlib import Path
from typing import Any, Optional
import numpy as np
import pandas as pd
import pytest
from datasets import (
Array2D,
Array3D,
Array4D,
Array5D,
Audio,
ClassLabel,
Dataset,
Features,
Image,
Sequence,
Translation,
TranslationVariableLanguages,
Value,
)
from datasets.features.features import FeatureType
def value(content: Any, dtype: Any) -> Dataset:
return Dataset.from_pandas(pd.DataFrame({"col": [content]}, dtype=dtype))
def other(content: Any, feature_type: Optional[FeatureType] = None) -> Dataset:
if feature_type:
features = Features({"col": feature_type})
return Dataset.from_dict({"col": [content]}, features=features)
else:
return Dataset.from_dict({"col": [content]})
@pytest.fixture(scope="session")
def datasets() -> Mapping[str, Dataset]:
sampling_rate = 16_000
return {
# Value feature
"null": value(None, None),
"bool": value(False, pd.BooleanDtype()),
"int8": value(-7, pd.Int8Dtype()),
"int16": value(-7, pd.Int16Dtype()),
"int32": value(-7, pd.Int32Dtype()),
"int64": value(-7, pd.Int64Dtype()),
"uint8": value(7, pd.UInt8Dtype()),
"uint16": value(7, pd.UInt16Dtype()),
"uint32": value(7, pd.UInt32Dtype()),
"uint64": value(7, pd.UInt64Dtype()),
"float16": value(-3.14, np.float16),
"float32": value(-3.14, np.float32),
"float64": value(-3.14, np.float64),
"time": value(datetime.time(1, 1, 1), None),
"timestamp_1": value(pd.Timestamp(2020, 1, 1), None),
"timestamp_2": value(pd.Timestamp(1513393355.5, unit="s"), None),
"timestamp_3": value(pd.Timestamp(1513393355500, unit="ms"), None),
"timestamp_tz": value(pd.Timestamp(year=2020, month=1, day=1, tz="US/Pacific"), None),
"string": value("a string", pd.StringDtype(storage="python")),
# other types of features
"class_label": other("positive", ClassLabel(names=["negative", "positive"])),
"dict": other({"a": 0}, None),
"list": other([{"a": 0}], None),
"sequence_simple": other([0], None),
"sequence": other([{"a": 0}], Sequence(feature={"a": Value(dtype="int64")})),
"array2d": other(np.zeros((2, 2), dtype="float32"), Array2D(shape=(2, 2), dtype="float32")),
"array3d": other(np.zeros((2, 2, 2), dtype="float32"), Array3D(shape=(2, 2, 2), dtype="float32")),
"array4d": other(np.zeros((2, 2, 2, 2), dtype="float32"), Array4D(shape=(2, 2, 2, 2), dtype="float32")),
"array5d": other(np.zeros((2, 2, 2, 2, 2), dtype="float32"), Array5D(shape=(2, 2, 2, 2, 2), dtype="float32")),
"audio": other({"array": [0.1, 0.2, 0.3], "sampling_rate": sampling_rate}, Audio(sampling_rate=sampling_rate)),
"image": other(str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"), Image()),
"translation": other({"en": "the cat", "fr": "le chat"}, Translation(languages=["en", "fr"])),
"translation_variable_languages": other(
{"en": "the cat", "fr": ["le chat", "la chatte"]},
TranslationVariableLanguages(languages=["en", "fr"]),
),
"images_list": other(
[
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
],
[Image()],
),
"audios_list": other(
[
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
[Audio()],
),
"images_sequence": other(
[
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
],
Sequence(feature=Image()),
),
"audios_sequence": other(
[
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
Sequence(feature=Audio()),
),
"dict_of_audios_and_images": other(
{
"a": 0,
"b": [
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
],
"c": {
"ca": [
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
]
},
},
{"a": Value(dtype="int64"), "b": [Image()], "c": {"ca": [Audio()]}},
),
"sequence_of_dicts": other(
[{"a": {"b": 0}}, {"a": {"b": 1}}], Sequence(feature={"a": {"b": Value(dtype="int64")}})
),
"none_value": other({"a": None}, {"a": Value(dtype="int64")}),
"big": Dataset.from_pandas(
pd.DataFrame({"col": ["a" * 1_234 for _ in range(4_567)]}, dtype=pd.StringDtype(storage="python"))
),
"spawning_opt_in_out": Dataset.from_pandas(
pd.DataFrame(
{
"col": [
"http://testurl.test/test_image-optOut.jpg",
"http://testurl.test/test_image2.jpg",
"other",
"http://testurl.test/test_image3-optIn.png",
]
},
dtype=pd.StringDtype(storage="python"),
)
),
"duckdb_index": Dataset.from_pandas(
pd.DataFrame(
{
"text": [
(
"Grand Moff Tarkin and Lord Vader are interrupted in their discussion by the buzz of the"
" comlink"
),
"There goes another one.",
"Vader turns round and round in circles as his ship spins into space.",
"We count thirty Rebel ships, Lord Vader.",
"The wingman spots the pirateship coming at him and warns the Dark Lord",
],
"column with spaces": [
"a",
"b",
"c",
"d",
"e",
],
},
dtype=pd.StringDtype(storage="python"),
)
),
"descriptive_statistics": Dataset.from_dict(
{
"int_column": [0, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 5, 5, 5, 5, 6, 7, 8, 8, 8],
"int_nan_column": [0, None, 1, None, 2, None, 2, None, 4, None, 5, None, 5, 5, 5, 6, 7, 8, 8, 8],
"float_column": [
0.1,
0.2,
0.3,
0.4,
0.5,
1.1,
2.2,
2.3,
2.6,
4.7,
5.1,
6.2,
6.7,
6.8,
7.0,
8.3,
8.4,
9.2,
9.7,
9.9,
],
"float_nan_column": [
None,
0.2,
0.3,
None,
0.5,
None,
2.2,
None,
2.6,
4.7,
5.1,
None,
None,
None,
None,
8.3,
8.4,
9.2,
9.7,
9.9,
],
"class_label_column": [
"cat",
"dog",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"dog",
"cat",
"dog",
"cat",
],
"class_label_nan_column": [
"cat",
None,
"cat",
"cat",
"cat",
None,
"cat",
"cat",
"cat",
None,
"cat",
"cat",
"cat",
"cat",
"cat",
"cat",
"dog",
"cat",
None,
"cat",
],
"float_negative_column": [
-7.221,
-5.333,
-15.154,
-15.392,
-15.054,
-10.176,
-10.072,
-10.59,
-6.0,
-14.951,
-14.054,
-9.706,
-7.053,
-10.072,
-15.054,
-12.777,
-12.233,
-13.54,
-14.376,
-15.754,
],
"float_cross_zero_column": [
-7.221,
-5.333,
-15.154,
-15.392,
-15.054,
-10.176,
-10.072,
-10.59,
6.0,
14.951,
14.054,
-9.706,
7.053,
0.0,
-15.054,
-12.777,
12.233,
13.54,
-14.376,
15.754,
],
"float_large_values_column": [
1101.34567,
1178.923,
197.2134,
1150.8483,
169.907655,
156.4580,
134.4368456,
189.456,
145.0912,
148.354465,
190.8943,
1134.34,
155.22366,
153.0,
163.0,
143.5468,
177.231,
132.568,
191.99,
1114.0,
],
"int_negative_column": [
-10,
-9,
-8,
-1,
-5,
-1,
-2,
-3,
-5,
-4,
-7,
-8,
-11,
-15,
-20 - 11,
-1,
-14,
-11,
-0,
-10,
],
"int_cross_zero_column": [
-10,
-9,
-8,
0,
0,
1,
2,
-3,
-5,
4,
7,
8,
11,
15,
20 - 11,
-1,
14,
11,
0,
-10,
],
"int_large_values_column": [
1101,
1178,
197,
1150,
169,
156,
134,
189,
145,
148,
190,
1134,
155,
153,
163,
143,
177,
132,
191,
1114,
],
},
features=Features(
{
"int_column": Value("int32"),
"int_nan_column": Value("int32"),
"int_negative_column": Value("int32"),
"int_cross_zero_column": Value("int32"),
"int_large_values_column": Value("int32"),
"float_column": Value("float32"),
"float_nan_column": Value("float32"),
"float_negative_column": Value("float64"),
"float_cross_zero_column": Value("float32"),
"float_large_values_column": Value("float32"),
"class_label_column": ClassLabel(names=["cat", "dog"]),
"class_label_nan_column": ClassLabel(names=["cat", "dog"]),
}
),
),
}
| datasets-server-main | services/worker/tests/fixtures/datasets.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/tests/fixtures/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
# Adapted from https://github.com/huggingface/datasets/blob/main/tests/fixtures/hub.py
import csv
import time
from collections.abc import Callable, Iterator, Mapping
from contextlib import suppress
from pathlib import Path
from typing import Any, Literal, Optional, TypedDict, Union
import pytest
import requests
from datasets import Dataset, DatasetBuilder, load_dataset_builder
from huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES
from huggingface_hub.hf_api import HfApi
from huggingface_hub.utils._errors import hf_raise_for_status
from ..constants import CI_HUB_ENDPOINT, CI_URL_TEMPLATE, CI_USER, CI_USER_TOKEN
DATASET = "dataset"
hf_api = HfApi(endpoint=CI_HUB_ENDPOINT)
def get_default_config_split() -> tuple[str, str]:
config = "default"
split = "train"
return config, split
def update_repo_settings(
*,
repo_id: str,
private: Optional[bool] = None,
gated: Optional[str] = None,
token: Optional[str] = None,
organization: Optional[str] = None,
repo_type: Optional[str] = None,
name: Optional[str] = None,
) -> Any:
"""Update the settings of a repository.
Args:
repo_id (`str`, *optional*):
A namespace (user or an organization) and a repo name separated
by a `/`.
<Tip>
Version added: 0.5
</Tip>
private (`bool`, *optional*, defaults to `None`):
Whether the repo should be private.
gated (`str`, *optional*, defaults to `None`):
Whether the repo should request user access.
Possible values are 'auto' and 'manual'
token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if uploading to a dataset or
space, `None` or `"model"` if uploading to a model. Default is
`None`.
Returns:
The HTTP response in json.
<Tip>
Raises the following errors:
- [`~huggingface_hub.utils.RepositoryNotFoundError`]
If the repository to download from cannot be found. This may be because it doesn't exist,
or because it is set to `private` and you do not have access.
</Tip>
"""
if repo_type not in REPO_TYPES:
raise ValueError("Invalid repo type")
organization, name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
if organization is None:
namespace = hf_api.whoami(token)["name"]
else:
namespace = organization
path_prefix = f"{hf_api.endpoint}/api/"
if repo_type in REPO_TYPES_URL_PREFIXES:
path_prefix += REPO_TYPES_URL_PREFIXES[repo_type]
path = f"{path_prefix}{namespace}/{name}/settings"
json: dict[str, Union[bool, str]] = {}
if private is not None:
json["private"] = private
if gated is not None:
json["gated"] = gated
r = requests.put(
path,
headers={"authorization": f"Bearer {token}"},
json=json,
)
hf_raise_for_status(r)
return r.json()
def create_hub_dataset_repo(
*,
prefix: str,
file_paths: Optional[list[str]] = None,
dataset: Optional[Dataset] = None,
private: bool = False,
gated: Optional[str] = None,
) -> str:
dataset_name = f"{prefix}-{int(time.time() * 10e3)}"
repo_id = f"{CI_USER}/{dataset_name}"
if dataset is not None:
dataset.push_to_hub(repo_id=repo_id, private=private, token=CI_USER_TOKEN, embed_external_files=True)
else:
hf_api.create_repo(repo_id=repo_id, token=CI_USER_TOKEN, repo_type=DATASET, private=private)
if gated:
update_repo_settings(repo_id=repo_id, token=CI_USER_TOKEN, gated=gated, repo_type=DATASET)
if file_paths is not None:
for file_path in file_paths:
hf_api.upload_file(
token=CI_USER_TOKEN,
path_or_fileobj=file_path,
path_in_repo=Path(file_path).name.replace("{dataset_name}", dataset_name),
repo_id=repo_id,
repo_type=DATASET,
)
return repo_id
def delete_hub_dataset_repo(repo_id: str) -> None:
with suppress(requests.exceptions.HTTPError, ValueError):
hf_api.delete_repo(repo_id=repo_id, token=CI_USER_TOKEN, repo_type=DATASET)
# TODO: factor all the datasets fixture with one function that manages the yield and deletion
@pytest.fixture
def tmp_dataset_repo_factory() -> Iterator[Callable[[str], str]]:
repo_ids: list[str] = []
def _tmp_dataset_repo(repo_id: str) -> str:
nonlocal repo_ids
hf_api.create_repo(repo_id=repo_id, token=CI_USER_TOKEN, repo_type=DATASET)
repo_ids.append(repo_id)
return repo_id
yield _tmp_dataset_repo
for repo_id in repo_ids:
delete_hub_dataset_repo(repo_id=repo_id)
# https://docs.pytest.org/en/6.2.x/fixture.html#yield-fixtures-recommended
@pytest.fixture(scope="session")
def hub_public_empty() -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="empty")
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_csv(csv_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="csv", file_paths=[csv_path])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_private_csv(csv_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="csv_private", file_paths=[csv_path], private=True)
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_gated_csv(csv_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="csv_gated", file_paths=[csv_path], gated="auto")
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_gated_duckdb_index(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="duckdb_index_gated", dataset=datasets["duckdb_index"], gated="auto")
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_gated_descriptive_statistics(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(
prefix="descriptive_statistics_gated",
dataset=datasets["descriptive_statistics"],
gated="auto",
)
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_jsonl(jsonl_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="jsonl", file_paths=[jsonl_path])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_audio(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="audio", dataset=datasets["audio"])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_image(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="image", dataset=datasets["image"])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_images_list(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="images_list", dataset=datasets["images_list"])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_big(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="big", dataset=datasets["big"])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_big_no_info(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="big-no-info", dataset=datasets["big"])
hf_api.delete_file(
"README.md", repo_id=repo_id, repo_type="dataset", commit_message="Delete README.md", token=CI_USER_TOKEN
)
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_big_csv(big_csv_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="big-csv", file_paths=[big_csv_path])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_external_files(dataset_script_with_external_files_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="external_files", file_paths=[dataset_script_with_external_files_path])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture
def external_files_dataset_builder(hub_public_external_files: str) -> DatasetBuilder:
return load_dataset_builder(hub_public_external_files)
@pytest.fixture(scope="session")
def hub_public_legacy_configs(dataset_script_with_two_configs_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="legacy_configs", file_paths=[dataset_script_with_two_configs_path])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_n_configs(dataset_script_with_n_configs_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="n_configs", file_paths=[dataset_script_with_n_configs_path])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_manual_download(dataset_script_with_manual_download_path: str) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="manual_download", file_paths=[dataset_script_with_manual_download_path])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_spawning_opt_in_out(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="spawning_opt_in_out", dataset=datasets["spawning_opt_in_out"])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_duckdb_index(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="duckdb_index", dataset=datasets["duckdb_index"])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
@pytest.fixture(scope="session")
def hub_public_descriptive_statistics(datasets: Mapping[str, Dataset]) -> Iterator[str]:
repo_id = create_hub_dataset_repo(prefix="descriptive_statistics", dataset=datasets["descriptive_statistics"])
yield repo_id
delete_hub_dataset_repo(repo_id=repo_id)
class HubDatasetTest(TypedDict):
name: str
config_names_response: Any
splits_response: Any
first_rows_response: Any
parquet_and_info_response: Any
HubDatasets = Mapping[str, HubDatasetTest]
def create_config_names_response(dataset: str) -> Any:
config, _ = get_default_config_split()
return {
"config_names": [
{
"dataset": dataset,
"config": config,
}
]
}
def create_splits_response(dataset: str) -> Any:
config, split = get_default_config_split()
return {
"splits": [
{
"dataset": dataset,
"config": config,
"split": split,
}
]
}
def create_first_rows_response(dataset: str, cols: Mapping[str, Any], rows: list[Any]) -> Any:
config, split = get_default_config_split()
return {
"dataset": dataset,
"config": config,
"split": split,
"features": [
{
"feature_idx": feature_idx,
"name": name,
"type": type,
}
for feature_idx, (name, type) in enumerate(cols.items())
],
"rows": [
{
"row_idx": row_idx,
"truncated_cells": [],
"row": row,
}
for row_idx, row in enumerate(rows)
],
"truncated": False,
}
def create_dataset_info_response_for_csv(dataset: str, config: str) -> Any:
dataset_name = dataset.split("/")[-1]
return {
"description": "",
"citation": "",
"homepage": "",
"license": "",
"features": DATA_cols,
"builder_name": "csv",
"config_name": config,
"dataset_name": dataset_name,
"version": {"version_str": "0.0.0", "major": 0, "minor": 0, "patch": 0},
"splits": {"train": {"name": "train", "num_bytes": 96, "num_examples": 4, "dataset_name": dataset_name}},
"download_checksums": {
f"https://hub-ci.huggingface.co/datasets/{dataset}/resolve/__COMMIT__/dataset.csv": {
"num_bytes": 50,
"checksum": None,
}
},
"download_size": 50,
"dataset_size": 96,
"size_in_bytes": 146,
}
def create_dataset_info_response_for_partially_generated_big_csv(dataset: str, config: str) -> Any:
# Dataset is partially converted to parquet: the first 10KB instead of the full 5MB
# Missing fields:
# - download_size: not applicable, because the dataset is generated using partially downloaded files
dataset_name = dataset.split("/")[-1]
return {
"description": "",
"citation": "",
"homepage": "",
"license": "",
"features": BIG_cols,
"builder_name": "csv",
"config_name": config,
"dataset_name": dataset_name,
"version": {"version_str": "0.0.0", "major": 0, "minor": 0, "patch": 0},
"splits": {"train": {"name": "train", "num_bytes": 12380, "num_examples": 10, "dataset_name": "csv"}},
"dataset_size": 12380,
}
def create_dataset_info_response_for_big_parquet(dataset: str, config: str) -> Any:
dataset_name = dataset.split("/")[-1]
return {
"description": "",
"citation": "",
"homepage": "",
"license": "",
"features": BIG_cols,
"builder_name": "parquet",
"config_name": config,
"dataset_name": dataset_name,
"version": {"version_str": "0.0.0", "major": 0, "minor": 0, "patch": 0},
"splits": {
"train": {"name": "train", "num_bytes": 5653946, "num_examples": len(BIG_rows), "dataset_name": None}
},
"download_size": BIG_PARQUET_FILE,
"dataset_size": 5653946,
}
def create_dataset_info_response_for_big_parquet_no_info() -> Any:
return {
"description": "",
"citation": "",
"homepage": "",
"license": "",
"features": BIG_cols,
"splits": {
"train": {"name": "train", "num_bytes": 12345, "num_examples": len(BIG_rows), "dataset_name": None}
},
"download_size": BIG_PARQUET_FILE,
"dataset_size": 12345,
}
def create_dataset_info_response_for_audio(dataset: str, config: str) -> Any:
dataset_name = dataset.split("/")[-1]
return {
"description": "",
"citation": "",
"homepage": "",
"license": "",
"features": AUDIO_cols,
"builder_name": "parquet",
"config_name": config,
"dataset_name": dataset_name,
"version": {"version_str": "0.0.0", "major": 0, "minor": 0, "patch": 0},
"splits": {"train": {"name": "train", "num_bytes": 59, "num_examples": 1, "dataset_name": None}},
"download_size": AUDIO_PARQUET_SIZE,
"dataset_size": 59,
}
def create_parquet_and_info_response(
dataset: str,
data_type: Literal["csv", "big-csv", "audio", "big_parquet", "big_parquet_no_info"],
partial: bool = False,
) -> Any:
config, split = get_default_config_split()
filename = "0000.parquet"
size = (
CSV_PARQUET_SIZE
if data_type == "csv"
else PARTIAL_CSV_PARQUET_SIZE
if data_type == "big-csv"
else AUDIO_PARQUET_SIZE
if data_type == "audio"
else BIG_PARQUET_FILE
)
info = (
create_dataset_info_response_for_csv(dataset, config)
if data_type == "csv"
else create_dataset_info_response_for_partially_generated_big_csv(dataset, config)
if data_type == "big-csv"
else create_dataset_info_response_for_audio(dataset, config)
if data_type == "audio"
else create_dataset_info_response_for_big_parquet(dataset, config)
if data_type == "big_parquet"
else create_dataset_info_response_for_big_parquet_no_info()
)
partial_prefix = "partial-" if partial else ""
return {
"parquet_files": [
{
"dataset": dataset,
"config": config,
"split": split,
"url": CI_URL_TEMPLATE.format(
repo_id=f"datasets/{dataset}",
revision="refs%2Fconvert%2Fparquet",
filename=f"{config}/{partial_prefix}{split}/{filename}",
),
"filename": filename,
"size": size,
}
],
"dataset_info": info,
"partial": partial,
}
CSV_PARQUET_SIZE = 1_866
PARTIAL_CSV_PARQUET_SIZE = 8_188
AUDIO_PARQUET_SIZE = 1_384
BIG_PARQUET_FILE = 38_896
DATA_cols = {
"col_1": {"_type": "Value", "dtype": "int64"},
"col_2": {"_type": "Value", "dtype": "int64"},
"col_3": {"_type": "Value", "dtype": "float64"},
}
DATA_rows = [
{"col_1": 0, "col_2": 0, "col_3": 0.0},
{"col_1": 1, "col_2": 1, "col_3": 1.0},
{"col_1": 2, "col_2": 2, "col_3": 2.0},
{"col_1": 3, "col_2": 3, "col_3": 3.0},
]
JSONL_cols = {
"col_1": {"_type": "Value", "dtype": "string"},
"col_2": {"_type": "Value", "dtype": "int64"},
"col_3": {"_type": "Value", "dtype": "float64"},
}
JSONL_rows = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": None, "col_2": 1, "col_3": 1.0},
{"col_1": None, "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
AUDIO_cols = {
"col": {
"_type": "Audio",
"sampling_rate": 16_000,
},
}
def get_AUDIO_rows(dataset: str) -> Any:
config, split = get_default_config_split()
return [
{
"col": [
{
"src": f"http://localhost/assets/{dataset}/--/{config}/{split}/0/col/audio.wav",
"type": "audio/wav",
},
]
}
]
IMAGE_cols = {
"col": {"_type": "Image"},
}
def get_IMAGE_rows(dataset: str) -> Any:
config, split = get_default_config_split()
return [
{
"col": {
"src": f"http://localhost/assets/{dataset}/--/{config}/{split}/0/col/image.jpg",
"height": 480,
"width": 640,
},
}
]
IMAGES_LIST_cols = {
"col": [{"_type": "Image"}],
}
def get_IMAGES_LIST_rows(dataset: str) -> Any:
config, split = get_default_config_split()
return [
{
"col": [
{
"src": f"http://localhost/assets/{dataset}/--/{config}/{split}/0/col/image-1d100e9.jpg",
"height": 480,
"width": 640,
},
{
"src": f"http://localhost/assets/{dataset}/--/{config}/{split}/0/col/image-1d300ea.jpg",
"height": 480,
"width": 640,
},
]
}
]
BIG_cols = {
"col": {"_type": "Value", "dtype": "string"},
}
BIG_rows = [{"col": "a" * 1_234} for _ in range(4_567)]
@pytest.fixture(scope="session")
def big_csv_path(tmp_path_factory: pytest.TempPathFactory) -> str:
path = str(tmp_path_factory.mktemp("data") / "big_dataset.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=list(BIG_cols))
writer.writeheader()
for row in BIG_rows:
writer.writerow(row)
return path
TEXT_cols = {
"text": {"_type": "Value", "dtype": "string"},
}
TEXT_rows = [
{"text": text}
for text in [
"foo",
"bar",
"foobar",
"- Hello there !",
"- General Kenobi !",
]
]
SPAWNING_OPT_IN_OUT_cols = {
"col": [{"_type": "Value", "dtype": "string"}],
}
SPAWNING_OPT_IN_OUT_rows = ["http://testurl.test/test_image.jpg", "http://testurl.test/test_image2.jpg", "other"]
@pytest.fixture
def hub_responses_does_not_exist() -> HubDatasetTest:
return {
"name": "does_not_exist",
"config_names_response": None,
"splits_response": None,
"first_rows_response": None,
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_does_not_exist_config() -> HubDatasetTest:
return {
"name": "does_not_exist_config",
"config_names_response": None,
"splits_response": None,
"first_rows_response": None,
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_does_not_exist_split() -> HubDatasetTest:
return {
"name": "does_not_exist_split",
"config_names_response": None,
"splits_response": None,
"first_rows_response": None,
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_empty(hub_public_empty: str) -> HubDatasetTest:
return {
"name": hub_public_empty,
"config_names_response": None,
"splits_response": None,
"first_rows_response": None,
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_public(hub_public_csv: str) -> HubDatasetTest:
return {
"name": hub_public_csv,
"config_names_response": create_config_names_response(hub_public_csv),
"splits_response": create_splits_response(hub_public_csv),
"first_rows_response": create_first_rows_response(hub_public_csv, DATA_cols, DATA_rows),
"parquet_and_info_response": create_parquet_and_info_response(dataset=hub_public_csv, data_type="csv"),
}
@pytest.fixture
def hub_responses_private(hub_private_csv: str) -> HubDatasetTest:
return {
"name": hub_private_csv,
"config_names_response": create_config_names_response(hub_private_csv),
"splits_response": create_splits_response(hub_private_csv),
"first_rows_response": create_first_rows_response(hub_private_csv, DATA_cols, DATA_rows),
"parquet_and_info_response": create_parquet_and_info_response(dataset=hub_private_csv, data_type="csv"),
}
@pytest.fixture
def hub_responses_gated(hub_gated_csv: str) -> HubDatasetTest:
return {
"name": hub_gated_csv,
"config_names_response": create_config_names_response(hub_gated_csv),
"splits_response": create_splits_response(hub_gated_csv),
"first_rows_response": create_first_rows_response(hub_gated_csv, DATA_cols, DATA_rows),
"parquet_and_info_response": create_parquet_and_info_response(dataset=hub_gated_csv, data_type="csv"),
}
@pytest.fixture
def hub_reponses_jsonl(hub_public_jsonl: str) -> HubDatasetTest:
return {
"name": hub_public_jsonl,
"config_names_response": create_config_names_response(hub_public_jsonl),
"splits_response": create_splits_response(hub_public_jsonl),
"first_rows_response": create_first_rows_response(hub_public_jsonl, JSONL_cols, JSONL_rows),
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_audio(hub_public_audio: str) -> HubDatasetTest:
return {
"name": hub_public_audio,
"config_names_response": create_config_names_response(hub_public_audio),
"splits_response": create_splits_response(hub_public_audio),
"first_rows_response": create_first_rows_response(
hub_public_audio, AUDIO_cols, get_AUDIO_rows(hub_public_audio)
),
"parquet_and_info_response": create_parquet_and_info_response(dataset=hub_public_audio, data_type="audio"),
}
@pytest.fixture
def hub_responses_image(hub_public_image: str) -> HubDatasetTest:
return {
"name": hub_public_image,
"config_names_response": create_config_names_response(hub_public_image),
"splits_response": create_splits_response(hub_public_image),
"first_rows_response": create_first_rows_response(
hub_public_image, IMAGE_cols, get_IMAGE_rows(hub_public_image)
),
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_images_list(hub_public_images_list: str) -> HubDatasetTest:
return {
"name": hub_public_images_list,
"config_names_response": create_config_names_response(hub_public_images_list),
"splits_response": create_splits_response(hub_public_images_list),
"first_rows_response": create_first_rows_response(
hub_public_images_list, IMAGES_LIST_cols, get_IMAGES_LIST_rows(hub_public_images_list)
),
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_big(hub_public_big: str) -> HubDatasetTest:
return {
"name": hub_public_big,
"config_names_response": create_config_names_response(hub_public_big),
"splits_response": create_splits_response(hub_public_big),
"first_rows_response": create_first_rows_response(hub_public_big, BIG_cols, BIG_rows),
"parquet_and_info_response": create_parquet_and_info_response(dataset=hub_public_big, data_type="big_parquet"),
}
@pytest.fixture
def hub_responses_big_no_info(hub_public_big_no_info: str) -> HubDatasetTest:
return {
"name": hub_public_big_no_info,
"config_names_response": create_config_names_response(hub_public_big_no_info),
"splits_response": create_splits_response(hub_public_big_no_info),
"first_rows_response": create_first_rows_response(hub_public_big_no_info, BIG_cols, BIG_rows),
"parquet_and_info_response": create_parquet_and_info_response(
dataset=hub_public_big_no_info, data_type="big_parquet_no_info"
),
}
@pytest.fixture
def hub_responses_big_csv(hub_public_big_csv: str) -> HubDatasetTest:
return {
"name": hub_public_big_csv,
"config_names_response": create_config_names_response(hub_public_big_csv),
"splits_response": create_splits_response(hub_public_big_csv),
"first_rows_response": create_first_rows_response(hub_public_big_csv, BIG_cols, BIG_rows),
"parquet_and_info_response": create_parquet_and_info_response(
dataset=hub_public_big_csv, data_type="big-csv", partial=True
),
}
@pytest.fixture
def hub_responses_external_files(hub_public_external_files: str) -> HubDatasetTest:
return {
"name": hub_public_external_files,
"config_names_response": create_config_names_response(hub_public_external_files),
"splits_response": create_splits_response(hub_public_external_files),
"first_rows_response": create_first_rows_response(hub_public_external_files, TEXT_cols, TEXT_rows),
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_spawning_opt_in_out(hub_public_spawning_opt_in_out: str) -> HubDatasetTest:
return {
"name": hub_public_spawning_opt_in_out,
"config_names_response": create_config_names_response(hub_public_spawning_opt_in_out),
"splits_response": create_splits_response(hub_public_spawning_opt_in_out),
"first_rows_response": create_first_rows_response(
hub_public_spawning_opt_in_out, SPAWNING_OPT_IN_OUT_cols, SPAWNING_OPT_IN_OUT_rows
),
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_duckdb_index(hub_public_duckdb_index: str) -> HubDatasetTest:
return {
"name": hub_public_duckdb_index,
"config_names_response": create_config_names_response(hub_public_duckdb_index),
"splits_response": create_splits_response(hub_public_duckdb_index),
"first_rows_response": create_first_rows_response(hub_public_duckdb_index, TEXT_cols, TEXT_rows),
"parquet_and_info_response": create_parquet_and_info_response(
dataset=hub_public_duckdb_index, data_type="csv"
),
}
@pytest.fixture
def hub_responses_partial_duckdb_index(hub_public_duckdb_index: str) -> HubDatasetTest:
return {
"name": hub_public_duckdb_index,
"config_names_response": create_config_names_response(hub_public_duckdb_index),
"splits_response": create_splits_response(hub_public_duckdb_index),
"first_rows_response": create_first_rows_response(hub_public_duckdb_index, TEXT_cols, TEXT_rows),
"parquet_and_info_response": create_parquet_and_info_response(
dataset=hub_public_duckdb_index, data_type="csv", partial=True
),
}
@pytest.fixture
def hub_responses_gated_duckdb_index(hub_gated_duckdb_index: str) -> HubDatasetTest:
return {
"name": hub_gated_duckdb_index,
"config_names_response": create_config_names_response(hub_gated_duckdb_index),
"splits_response": create_splits_response(hub_gated_duckdb_index),
"first_rows_response": create_first_rows_response(hub_gated_duckdb_index, TEXT_cols, TEXT_rows),
"parquet_and_info_response": create_parquet_and_info_response(dataset=hub_gated_duckdb_index, data_type="csv"),
}
@pytest.fixture
def hub_responses_descriptive_statistics(hub_public_descriptive_statistics: str) -> HubDatasetTest:
return {
"name": hub_public_descriptive_statistics,
"config_names_response": create_config_names_response(hub_public_descriptive_statistics),
"splits_response": create_splits_response(hub_public_descriptive_statistics),
"first_rows_response": None,
"parquet_and_info_response": None,
}
@pytest.fixture
def hub_responses_gated_descriptive_statistics(hub_gated_descriptive_statistics: str) -> HubDatasetTest:
return {
"name": hub_gated_descriptive_statistics,
"config_names_response": create_config_names_response(hub_gated_descriptive_statistics),
"splits_response": create_splits_response(hub_gated_descriptive_statistics),
"first_rows_response": None,
"parquet_and_info_response": None,
}
| datasets-server-main | services/worker/tests/fixtures/hub.py |
# type: ignore
import posixpath
import shutil
from pathlib import Path
from unittest.mock import patch
import fsspec
import pytest
from fsspec.implementations.local import (
AbstractFileSystem,
LocalFileSystem,
stringify_path,
)
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out] # noqa: E203
else:
return [name[len(self.local_root_dir) :] for name in out] # noqa: E203
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :] # noqa: E203
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
original_registry = fsspec.registry.copy()
fsspec.register_implementation("mock", MockFileSystem)
fsspec.register_implementation("tmp", TmpDirFileSystem)
yield
fsspec.registry = original_registry
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
shutil.rmtree(tmp_fs_dir)
| datasets-server-main | services/worker/tests/fixtures/fsspec.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from typing import Optional
from environs import Env
from libcommon.config import (
AssetsConfig,
CacheConfig,
CommonConfig,
LogConfig,
ParquetMetadataConfig,
ProcessingGraphConfig,
QueueConfig,
RowsIndexConfig,
)
WORKER_BLOCKED_DATASETS: list[str] = []
WORKER_CONTENT_MAX_BYTES = 10_000_000
WORKER_DIFFICULTY_MAX = None
WORKER_DIFFICULTY_MIN = None
WORKER_HEARTBEAT_INTERVAL_SECONDS = 60
WORKER_KILL_LONG_JOB_INTERVAL_SECONDS = 60
WORKER_KILL_ZOMBIES_INTERVAL_SECONDS = 10 * 60
WORKER_MAX_DISK_USAGE_PCT = 90
WORKER_MAX_JOB_DURATION_SECONDS = 20 * 60
WORKER_MAX_LOAD_PCT = 70
WORKER_MAX_MEMORY_PCT = 80
WORKER_MAX_MISSING_HEARTBEATS = 5
WORKER_SLEEP_SECONDS = 15
WORKER_STATE_FILE_PATH = None
def get_empty_str_list() -> list[str]:
return []
@dataclass(frozen=True)
class WorkerConfig:
blocked_datasets: list[str] = field(default_factory=WORKER_BLOCKED_DATASETS.copy)
content_max_bytes: int = WORKER_CONTENT_MAX_BYTES
difficulty_max: Optional[int] = WORKER_DIFFICULTY_MAX
difficulty_min: Optional[int] = WORKER_DIFFICULTY_MIN
heartbeat_interval_seconds: float = WORKER_HEARTBEAT_INTERVAL_SECONDS
job_types_blocked: list[str] = field(default_factory=get_empty_str_list)
job_types_only: list[str] = field(default_factory=get_empty_str_list)
kill_long_job_interval_seconds: float = WORKER_KILL_LONG_JOB_INTERVAL_SECONDS
kill_zombies_interval_seconds: float = WORKER_KILL_ZOMBIES_INTERVAL_SECONDS
max_disk_usage_pct: int = WORKER_MAX_DISK_USAGE_PCT
max_job_duration_seconds: float = WORKER_MAX_JOB_DURATION_SECONDS
max_load_pct: int = WORKER_MAX_LOAD_PCT
max_memory_pct: int = WORKER_MAX_MEMORY_PCT
max_missing_heartbeats: int = WORKER_MAX_MISSING_HEARTBEATS
sleep_seconds: float = WORKER_SLEEP_SECONDS
state_file_path: Optional[str] = WORKER_STATE_FILE_PATH
storage_paths: list[str] = field(default_factory=get_empty_str_list)
@classmethod
def from_env(cls) -> "WorkerConfig":
env = Env(expand_vars=True)
with env.prefixed("WORKER_"):
return cls(
blocked_datasets=env.list(name="BLOCKED_DATASETS", default=WORKER_BLOCKED_DATASETS.copy()),
content_max_bytes=env.int(name="CONTENT_MAX_BYTES", default=WORKER_CONTENT_MAX_BYTES),
difficulty_max=env.int(name="DIFFICULTY_MAX", default=WORKER_DIFFICULTY_MAX),
difficulty_min=env.int(name="DIFFICULTY_MIN", default=WORKER_DIFFICULTY_MIN),
heartbeat_interval_seconds=env.float(
name="HEARTBEAT_INTERVAL_SECONDS", default=WORKER_HEARTBEAT_INTERVAL_SECONDS
),
job_types_blocked=env.list(name="JOB_TYPES_BLOCKED", default=get_empty_str_list()),
job_types_only=env.list(name="JOB_TYPES_ONLY", default=get_empty_str_list()),
kill_long_job_interval_seconds=env.float(
name="KILL_LONG_JOB_INTERVAL_SECONDS", default=WORKER_KILL_LONG_JOB_INTERVAL_SECONDS
),
kill_zombies_interval_seconds=env.float(
name="KILL_ZOMBIES_INTERVAL_SECONDS", default=WORKER_KILL_ZOMBIES_INTERVAL_SECONDS
),
max_disk_usage_pct=env.int(name="MAX_DISK_USAGE_PCT", default=WORKER_MAX_DISK_USAGE_PCT),
max_job_duration_seconds=env.float(
name="MAX_JOB_DURATION_SECONDS", default=WORKER_MAX_JOB_DURATION_SECONDS
),
max_load_pct=env.int(name="MAX_LOAD_PCT", default=WORKER_MAX_LOAD_PCT),
max_memory_pct=env.int(name="MAX_MEMORY_PCT", default=WORKER_MAX_MEMORY_PCT),
max_missing_heartbeats=env.int(name="MAX_MISSING_HEARTBEATS", default=WORKER_MAX_MISSING_HEARTBEATS),
sleep_seconds=env.float(name="SLEEP_SECONDS", default=WORKER_SLEEP_SECONDS),
state_file_path=env.str(
name="STATE_FILE_PATH", default=WORKER_STATE_FILE_PATH
), # this environment variable is not expected to be set explicitly, it's set by the worker executor
storage_paths=env.list(name="STORAGE_PATHS", default=get_empty_str_list()),
)
DATASETS_BASED_HF_DATASETS_CACHE = None
@dataclass(frozen=True)
class DatasetsBasedConfig:
hf_datasets_cache: Optional[str] = DATASETS_BASED_HF_DATASETS_CACHE
@classmethod
def from_env(cls) -> "DatasetsBasedConfig":
env = Env(expand_vars=True)
with env.prefixed("DATASETS_BASED_"):
return cls(
hf_datasets_cache=env.str(name="HF_DATASETS_CACHE", default=DATASETS_BASED_HF_DATASETS_CACHE),
)
FIRST_ROWS_CELL_MIN_BYTES = 100
FIRST_ROWS_COLUMNS_MAX_NUMBER = 1_000
FIRST_ROWS_MAX_BYTES = 1_000_000
FIRST_ROWS_MAX_NUMBER = 100
FIRST_ROWS_MIN_NUMBER = 10
@dataclass(frozen=True)
class FirstRowsConfig:
columns_max_number: int = FIRST_ROWS_COLUMNS_MAX_NUMBER
max_bytes: int = FIRST_ROWS_MAX_BYTES
max_number: int = FIRST_ROWS_MAX_NUMBER
min_cell_bytes: int = FIRST_ROWS_CELL_MIN_BYTES
min_number: int = FIRST_ROWS_MIN_NUMBER
@classmethod
def from_env(cls) -> "FirstRowsConfig":
env = Env(expand_vars=True)
with env.prefixed("FIRST_ROWS_"):
return cls(
columns_max_number=env.int(name="COLUMNS_MAX_NUMBER", default=FIRST_ROWS_COLUMNS_MAX_NUMBER),
max_bytes=env.int(name="MAX_BYTES", default=FIRST_ROWS_MAX_BYTES),
max_number=env.int(name="MAX_NUMBER", default=FIRST_ROWS_MAX_NUMBER),
min_cell_bytes=env.int(name="CELL_MIN_BYTES", default=FIRST_ROWS_CELL_MIN_BYTES),
min_number=env.int(name="MIN_NUMBER", default=FIRST_ROWS_MIN_NUMBER),
)
OPT_IN_OUT_URLS_SCAN_COLUMNS_MAX_NUMBER = 10
OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER = 100
OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND = 50
OPT_IN_OUT_URLS_SCAN_ROWS_MAX_NUMBER = 100_000
OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN = None
OPT_IN_OUT_URLS_SCAN_SPAWNING_URL = "https://opts-api.spawningaiapi.com/api/v2/query/urls"
OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH = 1000
@dataclass(frozen=True)
class OptInOutUrlsScanConfig:
columns_max_number: int = FIRST_ROWS_COLUMNS_MAX_NUMBER
max_concurrent_requests_number: int = OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER
max_requests_per_second: int = OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND
rows_max_number: int = OPT_IN_OUT_URLS_SCAN_ROWS_MAX_NUMBER
spawning_token: Optional[str] = OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN
spawning_url: str = OPT_IN_OUT_URLS_SCAN_SPAWNING_URL
urls_number_per_batch: int = OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH
@classmethod
def from_env(cls) -> "OptInOutUrlsScanConfig":
env = Env(expand_vars=True)
with env.prefixed("OPT_IN_OUT_URLS_SCAN_"):
return cls(
columns_max_number=env.int(name="COLUMNS_MAX_NUMBER", default=OPT_IN_OUT_URLS_SCAN_COLUMNS_MAX_NUMBER),
max_concurrent_requests_number=env.int(
name="MAX_CONCURRENT_REQUESTS_NUMBER", default=OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER
),
max_requests_per_second=env.int(
name="MAX_REQUESTS_PER_SECOND", default=OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND
),
rows_max_number=env.int(name="ROWS_MAX_NUMBER", default=OPT_IN_OUT_URLS_SCAN_ROWS_MAX_NUMBER),
spawning_token=env.str(name="SPAWNING_TOKEN", default=OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN),
spawning_url=env.str(name="SPAWNING_URL", default=OPT_IN_OUT_URLS_SCAN_SPAWNING_URL),
urls_number_per_batch=env.int(
name="URLS_NUMBER_PER_BATCH", default=OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH
),
)
PARQUET_AND_INFO_COMMIT_MESSAGE = "Update parquet files"
PARQUET_AND_INFO_COMMITTER_HF_TOKEN = None
PARQUET_AND_INFO_MAX_DATASET_SIZE = 100_000_000
PARQUET_AND_INFO_MAX_EXTERNAL_DATA_FILES = 10_000
PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY = 100_000_000
PARQUET_AND_INFO_NO_MAX_SIZE_LIMIT_DATASETS: list[str] = []
PARQUET_AND_INFO_SOURCE_REVISION = "main"
PARQUET_AND_INFO_TARGET_REVISION = "refs/convert/parquet"
PARQUET_AND_INFO_URL_TEMPLATE = "/datasets/%s/resolve/%s/%s"
@dataclass(frozen=True)
class ParquetAndInfoConfig:
blocked_datasets: list[str] = field(default_factory=get_empty_str_list)
commit_message: str = PARQUET_AND_INFO_COMMIT_MESSAGE
committer_hf_token: Optional[str] = PARQUET_AND_INFO_COMMITTER_HF_TOKEN
max_dataset_size: int = PARQUET_AND_INFO_MAX_DATASET_SIZE
max_external_data_files: int = PARQUET_AND_INFO_MAX_EXTERNAL_DATA_FILES
max_row_group_byte_size_for_copy: int = PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY
no_max_size_limit_datasets: list[str] = field(default_factory=PARQUET_AND_INFO_NO_MAX_SIZE_LIMIT_DATASETS.copy)
source_revision: str = PARQUET_AND_INFO_SOURCE_REVISION
target_revision: str = PARQUET_AND_INFO_TARGET_REVISION
url_template: str = PARQUET_AND_INFO_URL_TEMPLATE
@classmethod
def from_env(cls) -> "ParquetAndInfoConfig":
env = Env(expand_vars=True)
with env.prefixed("PARQUET_AND_INFO_"):
return cls(
blocked_datasets=env.list(name="BLOCKED_DATASETS", default=get_empty_str_list()),
commit_message=env.str(name="COMMIT_MESSAGE", default=PARQUET_AND_INFO_COMMIT_MESSAGE),
committer_hf_token=env.str(name="COMMITTER_HF_TOKEN", default=PARQUET_AND_INFO_COMMITTER_HF_TOKEN),
max_dataset_size=env.int(name="MAX_DATASET_SIZE", default=PARQUET_AND_INFO_MAX_DATASET_SIZE),
max_external_data_files=env.int(
name="MAX_EXTERNAL_DATA_FILES", default=PARQUET_AND_INFO_MAX_EXTERNAL_DATA_FILES
),
max_row_group_byte_size_for_copy=env.int(
name="MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY", default=PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY
),
no_max_size_limit_datasets=env.list(
name="NO_MAX_SIZE_LIMIT_DATASETS", default=PARQUET_AND_INFO_NO_MAX_SIZE_LIMIT_DATASETS.copy()
),
source_revision=env.str(name="SOURCE_REVISION", default=PARQUET_AND_INFO_SOURCE_REVISION),
target_revision=env.str(name="TARGET_REVISION", default=PARQUET_AND_INFO_TARGET_REVISION),
url_template=env.str(name="URL_TEMPLATE", default=PARQUET_AND_INFO_URL_TEMPLATE),
)
NUMBA_CACHE_DIR: Optional[str] = None
@dataclass(frozen=True)
class NumbaConfig:
path: Optional[str] = NUMBA_CACHE_DIR # not documented
@classmethod
def from_env(cls) -> "NumbaConfig":
env = Env(expand_vars=True)
with env.prefixed("NUMBA_"):
return cls(path=env.str(name="CACHE_DIR", default=NUMBA_CACHE_DIR))
CONFIG_NAMES_MAX_NUMBER = 3_000
@dataclass(frozen=True)
class ConfigNamesConfig:
max_number: int = CONFIG_NAMES_MAX_NUMBER
@classmethod
def from_env(cls) -> "ConfigNamesConfig":
env = Env(expand_vars=True)
with env.prefixed("CONFIG_NAMES_"):
return cls(
max_number=env.int(name="MAX_NUMBER", default=CONFIG_NAMES_MAX_NUMBER),
)
DUCKDB_INDEX_CACHE_DIRECTORY = None
DUCKDB_INDEX_COMMIT_MESSAGE = "Update duckdb index file"
DUCKDB_INDEX_COMMITTER_HF_TOKEN = None
DUCKDB_INDEX_MAX_PARQUET_SIZE_BYTES = 100_000_000
DUCKDB_INDEX_TARGET_REVISION = "refs/convert/parquet"
DUCKDB_INDEX_URL_TEMPLATE = "/datasets/%s/resolve/%s/%s"
DUCKDB_INDEX_EXTENSIONS_DIRECTORY: Optional[str] = None
@dataclass(frozen=True)
class DuckDbIndexConfig:
cache_directory: Optional[str] = DUCKDB_INDEX_CACHE_DIRECTORY
commit_message: str = DUCKDB_INDEX_COMMIT_MESSAGE
committer_hf_token: Optional[str] = DUCKDB_INDEX_COMMITTER_HF_TOKEN
target_revision: str = DUCKDB_INDEX_TARGET_REVISION
url_template: str = DUCKDB_INDEX_URL_TEMPLATE
max_parquet_size_bytes: int = DUCKDB_INDEX_MAX_PARQUET_SIZE_BYTES
extensions_directory: Optional[str] = DUCKDB_INDEX_EXTENSIONS_DIRECTORY
@classmethod
def from_env(cls) -> "DuckDbIndexConfig":
env = Env(expand_vars=True)
with env.prefixed("DUCKDB_INDEX_"):
return cls(
cache_directory=env.str(name="CACHE_DIRECTORY", default=DUCKDB_INDEX_CACHE_DIRECTORY),
commit_message=env.str(name="COMMIT_MESSAGE", default=DUCKDB_INDEX_COMMIT_MESSAGE),
committer_hf_token=env.str(name="COMMITTER_HF_TOKEN", default=DUCKDB_INDEX_COMMITTER_HF_TOKEN),
target_revision=env.str(name="TARGET_REVISION", default=DUCKDB_INDEX_TARGET_REVISION),
url_template=env.str(name="URL_TEMPLATE", default=DUCKDB_INDEX_URL_TEMPLATE),
max_parquet_size_bytes=env.int(
name="MAX_PARQUET_SIZE_BYTES", default=DUCKDB_INDEX_MAX_PARQUET_SIZE_BYTES
),
extensions_directory=env.str(name="EXTENSIONS_DIRECTORY", default=DUCKDB_INDEX_EXTENSIONS_DIRECTORY),
)
DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY = None
DESCRIPTIVE_STATISTICS_HISTOGRAM_NUM_BINS = 10
DESCRIPTIVE_STATISTICS_MAX_PARQUET_SIZE_BYTES = 100_000_000
@dataclass(frozen=True)
class DescriptiveStatisticsConfig:
cache_directory: Optional[str] = DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY
parquet_revision: str = PARQUET_AND_INFO_TARGET_REVISION
histogram_num_bins: int = DESCRIPTIVE_STATISTICS_HISTOGRAM_NUM_BINS
max_parquet_size_bytes: int = DESCRIPTIVE_STATISTICS_MAX_PARQUET_SIZE_BYTES
@classmethod
def from_env(cls) -> "DescriptiveStatisticsConfig":
env = Env(expand_vars=True)
parquet_revision = env.str(name="PARQUET_AND_INFO_TARGET_REVISION", default=PARQUET_AND_INFO_TARGET_REVISION)
with env.prefixed("DESCRIPTIVE_STATISTICS_"):
return cls(
cache_directory=env.str(name="CACHE_DIRECTORY", default=DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY),
parquet_revision=parquet_revision,
histogram_num_bins=env.int(
name="HISTOGRAM_NUM_BINS",
default=DESCRIPTIVE_STATISTICS_HISTOGRAM_NUM_BINS,
),
max_parquet_size_bytes=env.int(
name="MAX_PARQUET_SIZE_BYTES", default=DESCRIPTIVE_STATISTICS_MAX_PARQUET_SIZE_BYTES
),
)
@dataclass(frozen=True)
class AppConfig:
assets: AssetsConfig = field(default_factory=AssetsConfig)
cache: CacheConfig = field(default_factory=CacheConfig)
common: CommonConfig = field(default_factory=CommonConfig)
config_names: ConfigNamesConfig = field(default_factory=ConfigNamesConfig)
datasets_based: DatasetsBasedConfig = field(default_factory=DatasetsBasedConfig)
first_rows: FirstRowsConfig = field(default_factory=FirstRowsConfig)
log: LogConfig = field(default_factory=LogConfig)
numba: NumbaConfig = field(default_factory=NumbaConfig)
parquet_and_info: ParquetAndInfoConfig = field(default_factory=ParquetAndInfoConfig)
processing_graph: ProcessingGraphConfig = field(default_factory=ProcessingGraphConfig)
queue: QueueConfig = field(default_factory=QueueConfig)
rows_index: RowsIndexConfig = field(default_factory=RowsIndexConfig)
worker: WorkerConfig = field(default_factory=WorkerConfig)
urls_scan: OptInOutUrlsScanConfig = field(default_factory=OptInOutUrlsScanConfig)
parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig)
duckdb_index: DuckDbIndexConfig = field(default_factory=DuckDbIndexConfig)
descriptive_statistics: DescriptiveStatisticsConfig = field(default_factory=DescriptiveStatisticsConfig)
@classmethod
def from_env(cls) -> "AppConfig":
return cls(
assets=AssetsConfig.from_env(),
common=CommonConfig.from_env(),
config_names=ConfigNamesConfig.from_env(),
cache=CacheConfig.from_env(),
datasets_based=DatasetsBasedConfig.from_env(),
first_rows=FirstRowsConfig.from_env(),
log=LogConfig.from_env(),
numba=NumbaConfig.from_env(),
parquet_and_info=ParquetAndInfoConfig.from_env(),
processing_graph=ProcessingGraphConfig.from_env(),
queue=QueueConfig.from_env(),
worker=WorkerConfig.from_env(),
urls_scan=OptInOutUrlsScanConfig.from_env(),
parquet_metadata=ParquetMetadataConfig.from_env(),
duckdb_index=DuckDbIndexConfig.from_env(),
descriptive_statistics=DescriptiveStatisticsConfig.from_env(),
rows_index=RowsIndexConfig.from_env(),
)
| datasets-server-main | services/worker/src/worker/config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from collections.abc import Mapping
from dataclasses import dataclass, field
from typing import Any, Optional, TypedDict, Union
from libcommon.utils import FeatureItem, Row, RowItem, SplitHubFile
class JobRunnerInfo(TypedDict):
job_type: str
job_runner_version: int
@dataclass
class JobResult:
content: Mapping[str, Any]
progress: float
def __post_init__(self) -> None:
if self.progress < 0.0 or self.progress > 1.0:
raise ValueError(f"Progress should be between 0 and 1, but got {self.progress}")
@dataclass
class CompleteJobResult(JobResult):
content: Mapping[str, Any]
progress: float = field(init=False, default=1.0)
class DatasetItem(TypedDict):
dataset: str
class ConfigItem(DatasetItem):
config: Optional[str]
class SplitItem(ConfigItem):
split: Optional[str]
class FullConfigItem(DatasetItem):
config: str
class FullSplitItem(FullConfigItem):
split: str
class SplitsList(TypedDict):
splits: list[FullSplitItem]
class FailedConfigItem(FullConfigItem):
error: Mapping[str, Any]
class DatasetSplitNamesResponse(TypedDict):
splits: list[FullSplitItem]
pending: list[FullConfigItem]
failed: list[FailedConfigItem]
class PreviousJob(TypedDict):
dataset: str
config: Optional[str]
split: Optional[Union[str, None]]
kind: str
class SplitFirstRowsResponse(FullSplitItem):
features: list[FeatureItem]
rows: list[RowItem]
truncated: Optional[bool]
class OptUrl(TypedDict):
url: str
row_idx: int
column_name: str
class OptInOutUrlsCountResponse(TypedDict):
urls_columns: list[str]
num_opt_in_urls: int
num_opt_out_urls: int
num_urls: int
num_scanned_rows: int
has_urls_columns: bool
full_scan: Union[bool, None]
class OptInOutUrlsScanResponse(OptInOutUrlsCountResponse):
opt_in_urls: list[OptUrl]
opt_out_urls: list[OptUrl]
class ImageUrlColumnsResponse(TypedDict):
columns: list[str]
class RowsContent(TypedDict):
rows: list[Row]
all_fetched: bool
class ConfigInfoResponse(TypedDict):
dataset_info: dict[str, Any]
partial: bool
class ConfigParquetAndInfoResponse(TypedDict):
parquet_files: list[SplitHubFile]
dataset_info: dict[str, Any]
partial: bool
class ParquetFileMetadataItem(SplitItem):
url: str
filename: str
size: int
num_rows: int
parquet_metadata_subpath: str
class ConfigParquetMetadataResponse(TypedDict):
parquet_files_metadata: list[ParquetFileMetadataItem]
features: Optional[dict[str, Any]]
partial: bool
class ConfigParquetResponse(TypedDict):
parquet_files: list[SplitHubFile]
features: Optional[dict[str, Any]]
partial: bool
class ConfigSize(TypedDict):
dataset: str
config: str
num_bytes_original_files: Optional[
int
] # optional because partial parquet conversion can't provide the size of the original data
num_bytes_parquet_files: int
num_bytes_memory: int
num_rows: int
num_columns: int
class SplitSize(TypedDict):
dataset: str
config: str
split: str
num_bytes_parquet_files: int
num_bytes_memory: int
num_rows: int
num_columns: int
class ConfigSizeContent(TypedDict):
config: ConfigSize
splits: list[SplitSize]
class ConfigSizeResponse(TypedDict):
size: ConfigSizeContent
partial: bool
class ConfigNameItem(TypedDict):
dataset: str
config: str
class DatasetConfigNamesResponse(TypedDict):
config_names: list[ConfigNameItem]
class DatasetInfoResponse(TypedDict):
dataset_info: dict[str, Any]
pending: list[PreviousJob]
failed: list[PreviousJob]
partial: bool
class IsValidResponse(TypedDict):
preview: bool
viewer: bool
search: bool
class DatasetHubCacheResponse(TypedDict):
preview: bool
viewer: bool
partial: bool
num_rows: int
class DatasetParquetResponse(TypedDict):
parquet_files: list[SplitHubFile]
pending: list[PreviousJob]
failed: list[PreviousJob]
partial: bool
class DatasetSize(TypedDict):
dataset: str
num_bytes_original_files: Optional[int]
num_bytes_parquet_files: int
num_bytes_memory: int
num_rows: int
class DatasetSizeContent(TypedDict):
dataset: DatasetSize
configs: list[ConfigSize]
splits: list[SplitSize]
class DatasetSizeResponse(TypedDict):
size: DatasetSizeContent
pending: list[PreviousJob]
failed: list[PreviousJob]
partial: bool
| datasets-server-main | services/worker/src/worker/dtos.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/src/worker/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import sys
from libcommon.log import init_logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.storage import (
init_assets_dir,
init_duckdb_index_cache_dir,
init_parquet_metadata_dir,
init_statistics_cache_dir,
)
from worker.config import AppConfig
from worker.job_runner_factory import JobRunnerFactory
from worker.loop import Loop
from worker.resources import LibrariesResource
if __name__ == "__main__":
app_config = AppConfig.from_env()
state_file_path = app_config.worker.state_file_path
if "--print-worker-state-path" in sys.argv:
print(state_file_path, flush=True)
if not state_file_path:
raise RuntimeError("The worker state file path is not set. Exiting.")
init_logging(level=app_config.log.level)
# ^ set first to have logs as soon as possible
assets_directory = init_assets_dir(directory=app_config.assets.storage_directory)
parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory)
duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=app_config.duckdb_index.cache_directory)
statistics_cache_directory = init_statistics_cache_dir(app_config.descriptive_statistics.cache_directory)
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
with (
LibrariesResource(
hf_endpoint=app_config.common.hf_endpoint,
init_hf_datasets_cache=app_config.datasets_based.hf_datasets_cache,
numba_path=app_config.numba.path,
) as libraries_resource,
CacheMongoResource(
database=app_config.cache.mongo_database, host=app_config.cache.mongo_url
) as cache_resource,
QueueMongoResource(
database=app_config.queue.mongo_database, host=app_config.queue.mongo_url
) as queue_resource,
):
if not cache_resource.is_available():
raise RuntimeError("The connection to the cache database could not be established. Exiting.")
if not queue_resource.is_available():
raise RuntimeError("The connection to the queue database could not be established. Exiting.")
job_runner_factory = JobRunnerFactory(
app_config=app_config,
processing_graph=processing_graph,
hf_datasets_cache=libraries_resource.hf_datasets_cache,
assets_directory=assets_directory,
parquet_metadata_directory=parquet_metadata_directory,
duckdb_index_cache_directory=duckdb_index_cache_directory,
statistics_cache_directory=statistics_cache_directory,
)
loop = Loop(
library_cache_paths=libraries_resource.storage_paths,
job_runner_factory=job_runner_factory,
state_file_path=state_file_path,
app_config=app_config,
processing_graph=processing_graph,
)
loop.run()
| datasets-server-main | services/worker/src/worker/start_worker_loop.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from typing import Optional
from libcommon.config import CommonConfig
from libcommon.exceptions import (
CustomError,
DatasetNotFoundError,
DatasetScriptError,
JobManagerCrashedError,
JobManagerExceededMaximumDurationError,
ResponseAlreadyComputedError,
TooBigContentError,
UnexpectedError,
)
from libcommon.orchestrator import DatasetOrchestrator
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.simple_cache import (
CachedArtifactError,
CacheEntryDoesNotExistError,
get_response_without_content_params,
)
from libcommon.utils import JobInfo, JobParams, JobResult, Priority, orjson_dumps
from worker.config import AppConfig, WorkerConfig
from worker.job_runner import JobRunner
from worker.utils import is_dataset_script_error
class JobManager:
"""
A job manager is a class that handles a job runner compute, for a specific processing step.
Args:
job_info (:obj:`JobInfo`):
The job to process. It contains the job_id, the job type, the dataset, the revision, the config,
the split and the priority level.
common_config (:obj:`CommonConfig`):
The common config.
processing_step (:obj:`ProcessingStep`):
The processing step to process.
"""
job_id: str
job_params: JobParams
priority: Priority
worker_config: WorkerConfig
common_config: CommonConfig
processing_step: ProcessingStep
processing_graph: ProcessingGraph
job_runner: JobRunner
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
job_runner: JobRunner,
processing_graph: ProcessingGraph,
) -> None:
self.job_info = job_info
self.job_type = job_info["type"]
self.job_id = job_info["job_id"]
self.priority = job_info["priority"]
self.job_params = job_info["params"]
self.common_config = app_config.common
self.worker_config = app_config.worker
self.job_runner = job_runner
self.processing_graph = processing_graph
self.processing_step = self.job_runner.processing_step
self.setup()
def setup(self) -> None:
job_type = self.job_runner.get_job_type()
if self.processing_step.job_type != job_type:
raise ValueError(
f"The processing step's job type is {self.processing_step.job_type}, but the job manager only"
f" processes {job_type}"
)
if self.job_type != job_type:
raise ValueError(
f"The submitted job type is {self.job_type}, but the job manager only processes {job_type}"
)
def __str__(self) -> str:
return f"JobManager(job_id={self.job_id} dataset={self.job_params['dataset']} job_info={self.job_info}"
def log(self, level: int, msg: str) -> None:
logging.log(level=level, msg=f"[{self.processing_step.job_type}] {msg}")
def debug(self, msg: str) -> None:
self.log(level=logging.DEBUG, msg=msg)
def info(self, msg: str) -> None:
self.log(level=logging.INFO, msg=msg)
def warning(self, msg: str) -> None:
self.log(level=logging.WARNING, msg=msg)
def exception(self, msg: str) -> None:
self.log(level=logging.ERROR, msg=msg)
def critical(self, msg: str) -> None:
self.log(level=logging.CRITICAL, msg=msg)
def run_job(self) -> JobResult:
try:
self.job_runner.validate()
job_result: JobResult = self.process()
except Exception:
job_result = {
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": False,
"output": None,
}
result_str = "SUCCESS" if job_result["is_success"] else "ERROR"
self.debug(f"job output with {result_str} - {self}")
return job_result
def finish(self, job_result: JobResult) -> None:
DatasetOrchestrator(
dataset=self.job_params["dataset"],
processing_graph=self.processing_graph,
).finish_job(job_result=job_result)
def raise_if_parallel_response_exists(self, parallel_cache_kind: str, parallel_job_version: int) -> None:
try:
existing_response = get_response_without_content_params(
kind=parallel_cache_kind,
job_params=self.job_params,
)
if (
existing_response["http_status"] == HTTPStatus.OK
and existing_response["job_runner_version"] == parallel_job_version
and existing_response["progress"] == 1.0 # completed response
and existing_response["dataset_git_revision"] == self.job_params["revision"]
):
raise ResponseAlreadyComputedError(
f"Response has already been computed and stored in cache kind: {parallel_cache_kind}. Compute will"
" be skipped."
)
except CacheEntryDoesNotExistError:
logging.debug(f"no cache found for {parallel_cache_kind}.")
def process(
self,
) -> JobResult:
self.info(f"compute {self}")
if self.job_info["params"]["dataset"] in self.worker_config.blocked_datasets:
self.debug(f"the dataset={self.job_params['dataset']} is blocked, don't update the cache")
return {
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": False,
"output": None,
}
try:
try:
self.job_runner.pre_compute()
parallel_job_runner = self.job_runner.get_parallel_job_runner()
if parallel_job_runner:
self.raise_if_parallel_response_exists(
parallel_cache_kind=parallel_job_runner["job_type"],
parallel_job_version=parallel_job_runner["job_runner_version"],
)
job_result = self.job_runner.compute()
content = job_result.content
# Validate content size
if len(orjson_dumps(content)) > self.worker_config.content_max_bytes:
raise TooBigContentError(
"The computed response content exceeds the supported size in bytes"
f" ({self.worker_config.content_max_bytes})."
)
finally:
# ensure the post_compute hook is called even if the compute raises an exception
self.job_runner.post_compute()
self.debug(
f"dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info}"
" is valid"
)
return {
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": True,
"output": {
"content": content,
"http_status": HTTPStatus.OK,
"error_code": None,
"details": None,
"progress": job_result.progress,
},
}
except DatasetNotFoundError:
# To avoid filling the cache, we don't save this error. Otherwise, DoS is possible.
self.debug(f"the dataset={self.job_params['dataset']} could not be found, don't update the cache")
return {
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": False,
"output": None,
}
except CachedArtifactError as err:
# A previous step (cached artifact required by the job runner) is an error. We copy the cached entry,
# so that users can see the underlying error (they are not interested in the internals of the graph).
# We add an entry to details: "copied_from_artifact", with its identification details, to have a chance
# to debug if needed.
self.debug(f"response for job_info={self.job_info} had an error from a previous step")
return {
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": False,
"output": {
"content": err.cache_entry_with_details["content"],
"http_status": err.cache_entry_with_details["http_status"],
"error_code": err.cache_entry_with_details["error_code"],
"details": err.enhanced_details,
"progress": None,
},
}
except Exception as err:
e = (
err
if isinstance(err, CustomError)
else DatasetScriptError(str(err), err)
if is_dataset_script_error()
else UnexpectedError(str(err), err)
)
self.debug(f"response for job_info={self.job_info} had an error")
return {
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": False,
"output": {
"content": dict(e.as_response()),
"http_status": e.status_code,
"error_code": e.code,
"details": dict(e.as_response_with_cause()),
"progress": None,
},
}
def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
self.debug(
"response for"
f" dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info}"
" had an error (crashed)"
)
error = JobManagerCrashedError(message=message, cause=cause)
self.finish(
job_result={
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": False,
"output": {
"content": dict(error.as_response()),
"http_status": error.status_code,
"error_code": error.code,
"details": dict(error.as_response_with_cause()),
"progress": None,
},
}
)
def set_exceeded_maximum_duration(self, message: str, cause: Optional[BaseException] = None) -> None:
self.debug(
"response for"
f" dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info}"
" had an error (exceeded maximum duration)"
)
error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
self.finish(
job_result={
"job_info": self.job_info,
"job_runner_version": self.job_runner.get_job_runner_version(),
"is_success": False,
"output": {
"content": dict(error.as_response()),
"http_status": error.status_code,
"error_code": error.code,
"details": dict(error.as_response_with_cause()),
"progress": None,
},
}
)
| datasets-server-main | services/worker/src/worker/job_manager.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import functools
import itertools
import logging
import sys
import time
import traceback
import warnings
from collections.abc import Callable, Sequence
from typing import Any, Optional, TypeVar, Union, cast
from urllib.parse import quote
import PIL
import requests
from datasets import Dataset, DatasetInfo, DownloadConfig, IterableDataset, load_dataset
from datasets.utils.file_utils import get_authentication_headers_for_url
from fsspec.implementations.http import HTTPFileSystem
from huggingface_hub.hf_api import HfApi
from huggingface_hub.utils._errors import RepositoryNotFoundError
from libcommon.constants import EXTERNAL_DATASET_SCRIPT_PATTERN
from libcommon.exceptions import (
ConfigNotFoundError,
DatasetNotFoundError,
NormalRowsError,
PreviousStepFormatError,
SplitNotFoundError,
StreamingRowsError,
)
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.utils import Row, RowItem, orjson_dumps
from pyarrow.parquet import ParquetFile
from worker.dtos import RowsContent
MAX_IMAGE_PIXELS = 10_000_000_000
# ^ see https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.MAX_IMAGE_PIXELS
def get_json_size(obj: Any) -> int:
"""Returns the size of an object in bytes once serialized as JSON
Args:
obj (Any): the Python object
Returns:
int: the size of the serialized object in bytes
"""
return len(orjson_dumps(obj))
# from https://stackoverflow.com/a/43848928/7351594
def utf8_lead_byte(b: int) -> bool:
"""A UTF-8 intermediate byte starts with the bits 10xxxxxx."""
return (b & 0xC0) != 0x80
def utf8_byte_truncate(text: str, max_bytes: int) -> str:
"""If text[max_bytes] is not a lead byte, back up until a lead byte is
found and truncate before that character."""
utf8 = text.encode("utf8")
if len(utf8) <= max_bytes:
return text
i = max_bytes
while i > 0 and not utf8_lead_byte(utf8[i]):
i -= 1
return utf8[:i].decode("utf8", "ignore")
# Mutates row_item, and returns it anyway
def truncate_row_item(row_item: RowItem, min_cell_bytes: int, columns_to_keep_untruncated: list[str]) -> RowItem:
row = {}
for column_name, cell in row_item["row"].items():
# for now: all the cells above min_cell_bytes are truncated to min_cell_bytes
# it's done by replacing the cell (which can have any type) by a string with
# its JSON serialization, and then truncating it to min_cell_bytes
cell_json = orjson_dumps(cell)
if len(cell_json) <= min_cell_bytes or column_name in columns_to_keep_untruncated:
row[column_name] = cell
else:
cell_json_str = cell_json.decode("utf8", "ignore")
row_item["truncated_cells"].append(column_name)
row[column_name] = utf8_byte_truncate(text=cell_json_str, max_bytes=min_cell_bytes)
row_item["row"] = row
# row_idx = row_item["row_idx"]
# logging.debug(f"the size of the rows is now ({rows_bytes}) after truncating row idx={row_idx}")
return row_item
COMMA_SIZE = 1 # the comma "," is encoded with one byte in utf-8
# Mutates row_items, and returns them anyway
def truncate_row_items(
row_items: list[RowItem], min_cell_bytes: int, rows_max_bytes: int, columns_to_keep_untruncated: list[str]
) -> list[RowItem]:
# compute the current size
rows_bytes = sum(get_json_size(row_item) for row_item in row_items) + COMMA_SIZE * (len(row_items) - 1)
# Loop backwards, so that the last rows are truncated first
for row_item in reversed(row_items):
if rows_bytes < rows_max_bytes:
break
previous_size = get_json_size(row_item) + COMMA_SIZE
row_item = truncate_row_item(
row_item=row_item, min_cell_bytes=min_cell_bytes, columns_to_keep_untruncated=columns_to_keep_untruncated
)
new_size = get_json_size(row_item) + COMMA_SIZE
rows_bytes += new_size - previous_size
return row_items
def to_row_item(row_idx: int, row: Row) -> RowItem:
return {
"row_idx": row_idx,
"row": row,
"truncated_cells": [],
}
def create_truncated_row_items(
rows: list[Row],
min_cell_bytes: int,
rows_max_bytes: int,
rows_min_number: int,
columns_to_keep_untruncated: list[str],
) -> tuple[list[RowItem], bool]:
row_items = []
rows_bytes = 0
# two restrictions must be enforced:
# - at least rows_min_number rows
# - at most rows_max_bytes bytes. Note that it's the limit to the sum of the rows sizes. The JSON response size
# will be greater, due to the other fields (row_idx, truncated_cells, features, etc.).
# To enforce this:
# 1. first get the first rows_min_number rows
for row_idx, row in enumerate(rows[:rows_min_number]):
row_item = to_row_item(row_idx=row_idx, row=row)
rows_bytes += get_json_size(row_item) + COMMA_SIZE
row_items.append(row_item)
# 2. if the total is over the bytes limit, truncate the values, iterating backwards starting
# from the last rows, until getting under the threshold
# caveat: the truncation might not be enough to get under the threshold if:
# - the number of columns is too high
# - rows_max_bytes is too low (or even negative)
if rows_bytes >= rows_max_bytes:
# logging.debug(
# f"the size of the first {rows_min_number} rows ({rows_bytes}) is above the max number of bytes"
# f" ({rows_max_bytes}), they will be truncated"
# )
truncated_row_items = truncate_row_items(
row_items=row_items,
min_cell_bytes=min_cell_bytes,
rows_max_bytes=rows_max_bytes,
columns_to_keep_untruncated=columns_to_keep_untruncated,
)
return truncated_row_items, len(truncated_row_items) < len(rows)
# 3. else: add the remaining rows until the end, or until the bytes threshold
for idx, row in enumerate(rows[rows_min_number:]):
row_idx = rows_min_number + idx
row_item = to_row_item(row_idx=row_idx, row=row)
rows_bytes += get_json_size(row_item) + COMMA_SIZE
if rows_bytes >= rows_max_bytes:
# logging.debug(
# f"the rows in the split have been truncated to {row_idx} row(s) to keep the size"
# f" ({rows_bytes}) under the limit ({rows_max_bytes})"
# )
break
row_items.append(row_item)
return row_items, len(row_items) < len(rows)
FuncT = TypeVar("FuncT", bound=Callable[..., Any])
RETRY_SLEEPS = (1, 1, 1, 10, 10, 10, 60, 60, 60, 10 * 60)
RETRY_ON: tuple[type[Exception]] = (Exception,)
class retry:
"""retries with an increasing sleep before every attempt"""
def __init__(self, sleeps: Sequence[int] = RETRY_SLEEPS, on: Sequence[type[Exception]] = RETRY_ON) -> None:
self.sleeps = sleeps
self.on = on
def __call__(self, func: FuncT) -> FuncT:
@functools.wraps(func)
def decorator(*args: Any, **kwargs: Any) -> Any:
attempt = 0
last_err = None
while attempt < len(self.sleeps):
try:
"""always sleep before calling the function. It will prevent rate limiting in the first place"""
duration = self.sleeps[attempt]
logging.info(f"Sleep during {duration} seconds to preventively mitigate rate limiting.")
time.sleep(duration)
return func(*args, **kwargs)
except tuple(self.on) as err:
logging.info(f"Got a {type(err)}. Let's retry.")
last_err = err
attempt += 1
raise RuntimeError(f"Give up after {attempt} attempts. The last one raised {type(last_err)}") from last_err
return cast(FuncT, decorator)
@retry(on=[ConnectionError])
def get_rows(
dataset: str,
config: str,
split: str,
streaming: bool,
rows_max_number: int,
token: Union[bool, str, None] = False,
column_names: Optional[list[str]] = None,
) -> RowsContent:
download_config = DownloadConfig(delete_extracted=True)
PIL.Image.MAX_IMAGE_PIXELS = MAX_IMAGE_PIXELS
ds = load_dataset(
dataset,
name=config,
split=split,
streaming=streaming,
token=token,
download_config=download_config,
)
if streaming:
if not isinstance(ds, IterableDataset):
raise TypeError("load_dataset should return an IterableDataset in streaming mode")
elif not isinstance(ds, Dataset):
raise TypeError("load_dataset should return a Dataset in normal mode")
if column_names:
ds = ds.select_columns(column_names)
rows_plus_one = list(itertools.islice(ds, rows_max_number + 1))
# ^^ to be able to detect if a split has exactly ROWS_MAX_NUMBER rows
rows = rows_plus_one[:rows_max_number]
all_fetched = len(rows_plus_one) <= rows_max_number
if all_fetched:
logging.debug(f"all the rows in the split have been fetched ({len(rows_plus_one)})")
else:
logging.debug(f"the rows in the split have been truncated ({rows_max_number} rows)")
return RowsContent(rows=rows, all_fetched=all_fetched)
def get_rows_or_raise(
dataset: str,
config: str,
split: str,
rows_max_number: int,
token: Union[bool, str, None],
info: DatasetInfo,
max_size_fallback: Optional[int] = None,
column_names: Optional[list[str]] = [],
) -> RowsContent:
try:
return get_rows(
dataset=dataset,
config=config,
split=split,
streaming=True,
rows_max_number=rows_max_number,
token=token,
column_names=column_names,
)
except Exception as err:
MAX_SIZE_FALLBACK = 100_000_000
if max_size_fallback:
warnings.warn(
(
f"The parameter 'max_size_fallback' is deprecated. The hard-coded value `{MAX_SIZE_FALLBACK}`"
" will be used instead."
),
category=DeprecationWarning,
)
if info.size_in_bytes is None or info.size_in_bytes > MAX_SIZE_FALLBACK:
raise StreamingRowsError(
"Cannot load the dataset split (in streaming mode) to extract the first rows.",
cause=err,
) from err
try:
return get_rows(
dataset=dataset,
config=config,
split=split,
streaming=False,
rows_max_number=rows_max_number,
token=token,
)
except Exception as err:
raise NormalRowsError(
"Cannot load the dataset split (in normal download mode) to extract the first rows.",
cause=err,
) from err
# TODO: use huggingface_hub's hf_hub_url after
# https://github.com/huggingface/huggingface_hub/issues/1082
def hf_hub_url(repo_id: str, filename: str, hf_endpoint: str, revision: str, url_template: str) -> str:
return (hf_endpoint + url_template) % (repo_id, quote(revision, safe=""), filename)
def get_parquet_file(url: str, fs: HTTPFileSystem, hf_token: Optional[str]) -> ParquetFile:
headers = get_authentication_headers_for_url(url, token=hf_token)
return ParquetFile(fs.open(url, headers=headers))
DATASET_TYPE = "dataset"
HF_HUB_HTTP_ERROR_RETRY_SLEEPS = [1, 1, 1, 10, 10, 10]
LIST_REPO_REFS_RETRY_SLEEPS = [1, 1, 1, 10, 10]
LOCK_GIT_BRANCH_RETRY_SLEEPS = [1, 1, 1, 1, 1, 10, 10, 10, 10, 100] * 3
def create_branch(dataset: str, target_revision: str, hf_api: HfApi, committer_hf_api: HfApi) -> None:
try:
refs = retry(on=[requests.exceptions.ConnectionError], sleeps=LIST_REPO_REFS_RETRY_SLEEPS)(
hf_api.list_repo_refs
)(repo_id=dataset, repo_type=DATASET_TYPE)
if all(ref.ref != target_revision for ref in refs.converts):
initial_commit = hf_api.list_repo_commits(repo_id=dataset, repo_type=DATASET_TYPE)[-1].commit_id
committer_hf_api.create_branch(
repo_id=dataset, branch=target_revision, repo_type=DATASET_TYPE, revision=initial_commit, exist_ok=True
)
except RepositoryNotFoundError as err:
raise DatasetNotFoundError("The dataset does not exist on the Hub (was deleted during job).") from err
def check_config_exists(dataset: str, config: str) -> None:
"""
Check if dataset has a provided config. Dataset's configs are taken from 'dataset-config-names' step's cache.
"""
config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
try:
configs_content = config_names_best_response.response["content"]["config_names"]
except Exception as e:
raise PreviousStepFormatError(
"Previous steps 'dataset-config-names' did not return the expected content.",
e,
) from e
if config not in [config_item["config"] for config_item in configs_content]:
raise ConfigNotFoundError(f"Config '{config}' does not exist for dataset '{dataset}'")
def check_split_exists(dataset: str, config: str, split: str) -> None:
"""
Check if dataset has a provided split in a provided config. Dataset's splits are taken from the best response
of 'config-split-names-from-streaming' and 'config-split-names-from-info' steps' cache.
"""
check_config_exists(dataset, config)
split_names_best_response = get_previous_step_or_raise(
kinds=["config-split-names-from-streaming", "config-split-names-from-info"], dataset=dataset, config=config
)
try:
splits_content = split_names_best_response.response["content"]["splits"]
except Exception as e:
raise PreviousStepFormatError(
(
"Previous steps 'config-split-names-from-streaming' and 'config-split-names-from-info did not return"
" the expected content."
),
e,
) from e
if split not in [split_item["split"] for split_item in splits_content]:
raise SplitNotFoundError(f"Split '{split}' does not exist for the config '{config}' of the dataset.")
def is_dataset_script_error() -> bool:
(t, v, tb) = sys.exc_info()
cause_traceback: list[str] = traceback.format_exception(t, v, tb)
return any(EXTERNAL_DATASET_SCRIPT_PATTERN in cause for cause in cause_traceback)
| datasets-server-main | services/worker/src/worker/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
import random
import time
from dataclasses import dataclass, field
from datetime import datetime
from typing import Optional, TypedDict
import orjson
from filelock import FileLock
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import (
AlreadyStartedJobError,
EmptyQueueError,
LockTimeoutError,
NoWaitingJobError,
Queue,
)
from libcommon.utils import JobInfo, get_datetime
from psutil import cpu_count, disk_usage, getloadavg, swap_memory, virtual_memory
from worker.config import AppConfig
from worker.job_manager import JobManager
from worker.job_runner_factory import BaseJobRunnerFactory
class WorkerState(TypedDict):
current_job_info: Optional[JobInfo]
last_updated: datetime
@dataclass
class Loop:
"""
A loop gets jobs from a queue and processes them.
Once initialized, the loop can be started with the `run` method and will run until an uncaught exception
is raised.
Args:
job_runner_factory (`JobRunnerFactory`):
The job runner factory that will create a job runner for each job. Must be able to process the jobs of the
queue.
library_cache_paths (`set[str]`):
The paths of the library caches. Used to check if the disk is full.
worker_config (`WorkerConfig`):
Worker configuration.
state_file_path (`str`):
The path of the file where the state of the loop will be saved.
"""
job_runner_factory: BaseJobRunnerFactory
library_cache_paths: set[str]
app_config: AppConfig
processing_graph: ProcessingGraph
state_file_path: str
storage_paths: set[str] = field(init=False)
def __post_init__(self) -> None:
self.queue = Queue()
self.storage_paths = set(self.app_config.worker.storage_paths).union(self.library_cache_paths)
def has_memory(self) -> bool:
if self.app_config.worker.max_memory_pct <= 0:
return True
virtual_memory_used = int(virtual_memory().used)
virtual_memory_total = int(virtual_memory().total)
percent = (swap_memory().used + virtual_memory_used) / (swap_memory().total + virtual_memory_total)
ok = percent < self.app_config.worker.max_memory_pct
if not ok:
logging.info(
f"memory usage (RAM + SWAP) is too high: {percent:.0f}% - max is"
f" {self.app_config.worker.max_memory_pct}%"
)
return ok
def has_cpu(self) -> bool:
if self.app_config.worker.max_load_pct <= 0:
return True
load_pct = max(getloadavg()[:2]) / cpu_count() * 100
# ^ only current load and 5m load. 15m load is not relevant to decide to launch a new job
ok = load_pct < self.app_config.worker.max_load_pct
if not ok:
logging.info(f"cpu load is too high: {load_pct:.0f}% - max is {self.app_config.worker.max_load_pct}%")
return ok
def has_storage(self) -> bool:
if self.app_config.worker.max_disk_usage_pct <= 0:
return True
for path in self.storage_paths:
try:
usage = disk_usage(path)
if usage.percent >= self.app_config.worker.max_disk_usage_pct:
return False
except Exception:
# if we can't get the disk usage, we let the process continue
return True
return True
def has_resources(self) -> bool:
return self.has_memory() and self.has_cpu() and self.has_storage()
def sleep(self) -> None:
jitter = 0.75 + random.random() / 2 # nosec
# ^ between 0.75 and 1.25
duration = self.app_config.worker.sleep_seconds * jitter
logging.debug(f"sleep during {duration:.2f} seconds")
time.sleep(duration)
def run(self) -> None:
logging.info("Worker loop started")
try:
while True:
if self.has_resources() and self.process_next_job():
# loop immediately to try another job
# see https://github.com/huggingface/datasets-server/issues/265
continue
self.sleep()
except BaseException:
logging.exception("quit due to an uncaught error while processing the job")
raise
def process_next_job(self) -> bool:
logging.debug("try to process a job")
try:
job_info = self.queue.start_job(
difficulty_min=self.app_config.worker.difficulty_min,
difficulty_max=self.app_config.worker.difficulty_max,
job_types_blocked=self.app_config.worker.job_types_blocked,
job_types_only=self.app_config.worker.job_types_only,
)
self.set_worker_state(current_job_info=job_info)
logging.debug(f"job assigned: {job_info}")
except (EmptyQueueError, AlreadyStartedJobError, LockTimeoutError, NoWaitingJobError) as e:
self.set_worker_state(current_job_info=None)
logging.debug(e)
return False
job_runner = self.job_runner_factory.create_job_runner(job_info)
job_manager = JobManager(
job_info=job_info,
app_config=self.app_config,
job_runner=job_runner,
processing_graph=self.processing_graph,
)
job_result = job_manager.run_job()
job_manager.finish(job_result=job_result)
self.set_worker_state(current_job_info=None)
return True
def set_worker_state(self, current_job_info: Optional[JobInfo]) -> None:
worker_state: WorkerState = {"current_job_info": current_job_info, "last_updated": get_datetime()}
with FileLock(f"{self.state_file_path}.lock"):
with open(self.state_file_path, "wb") as worker_state_f:
worker_state_f.write(orjson.dumps(worker_state))
| datasets-server-main | services/worker/src/worker/loop.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional
import datasets
from datasets.utils.logging import get_verbosity, log_levels, set_verbosity
from libcommon.resources import Resource
@dataclass
class LibrariesResource(Resource):
hf_endpoint: str
init_hf_datasets_cache: Optional[str] = None
numba_path: Optional[str] = None
previous_hf_endpoint: str = field(init=False)
previous_hf_update_download_counts: bool = field(init=False)
previous_verbosity: int = field(init=False)
hf_datasets_cache: Path = field(init=False)
storage_paths: set[str] = field(init=False)
def allocate(self) -> None:
self.hf_datasets_cache = (
datasets.config.HF_DATASETS_CACHE
if self.init_hf_datasets_cache is None
else Path(self.init_hf_datasets_cache)
)
# Ensure the datasets library uses the expected HuggingFace endpoint
self.previous_hf_endpoint = datasets.config.HF_ENDPOINT
datasets.config.HF_ENDPOINT = self.hf_endpoint
# Don't increase the datasets download counts on huggingface.co
self.previous_hf_update_download_counts = datasets.config.HF_UPDATE_DOWNLOAD_COUNTS
datasets.config.HF_UPDATE_DOWNLOAD_COUNTS = False
# Set logs from the datasets library to the least verbose
self.previous_verbosity = get_verbosity()
set_verbosity(log_levels["critical"])
# Note: self.hf_endpoint is ignored by the huggingface_hub library for now (see
# the discussion at https://github.com/huggingface/datasets/pull/5196), and this breaks
# various of the datasets functions. The fix, for now, is to set the HF_ENDPOINT
# environment variable to the desired value.
# TODO: check here if huggingface_hub and datasets use the same endpoint
# Add the datasets and numba cache paths to the list of storage paths, to ensure the disk is not full
storage_paths = {str(self.hf_datasets_cache), str(datasets.config.HF_MODULES_CACHE)}
if self.numba_path is not None:
storage_paths.add(self.numba_path)
self.storage_paths = storage_paths
def release(self) -> None:
datasets.config.HF_ENDPOINT = self.previous_hf_endpoint
datasets.config.HF_UPDATE_DOWNLOAD_COUNTS = self.previous_hf_update_download_counts
set_verbosity(self.previous_verbosity)
| datasets-server-main | services/worker/src/worker/resources.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from libcommon.processing_graph import ProcessingGraph
from libcommon.storage import StrPath
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.job_runner import JobRunner
from worker.job_runners.config.info import ConfigInfoJobRunner
from worker.job_runners.config.is_valid import ConfigIsValidJobRunner
from worker.job_runners.config.opt_in_out_urls_count import (
ConfigOptInOutUrlsCountJobRunner,
)
from worker.job_runners.config.parquet import ConfigParquetJobRunner
from worker.job_runners.config.parquet_and_info import ConfigParquetAndInfoJobRunner
from worker.job_runners.config.parquet_metadata import ConfigParquetMetadataJobRunner
from worker.job_runners.config.size import ConfigSizeJobRunner
from worker.job_runners.config.split_names_from_info import (
ConfigSplitNamesFromInfoJobRunner,
)
from worker.job_runners.config.split_names_from_streaming import (
ConfigSplitNamesFromStreamingJobRunner,
)
from worker.job_runners.dataset.config_names import DatasetConfigNamesJobRunner
from worker.job_runners.dataset.hub_cache import DatasetHubCacheJobRunner
from worker.job_runners.dataset.info import DatasetInfoJobRunner
from worker.job_runners.dataset.is_valid import DatasetIsValidJobRunner
from worker.job_runners.dataset.opt_in_out_urls_count import (
DatasetOptInOutUrlsCountJobRunner,
)
from worker.job_runners.dataset.parquet import DatasetParquetJobRunner
from worker.job_runners.dataset.size import DatasetSizeJobRunner
from worker.job_runners.dataset.split_names import DatasetSplitNamesJobRunner
from worker.job_runners.split.descriptive_statistics import (
SplitDescriptiveStatisticsJobRunner,
)
from worker.job_runners.split.duckdb_index import SplitDuckDbIndexJobRunner
from worker.job_runners.split.first_rows_from_parquet import (
SplitFirstRowsFromParquetJobRunner,
)
from worker.job_runners.split.first_rows_from_streaming import (
SplitFirstRowsFromStreamingJobRunner,
)
from worker.job_runners.split.image_url_columns import SplitImageUrlColumnsJobRunner
from worker.job_runners.split.is_valid import SplitIsValidJobRunner
from worker.job_runners.split.opt_in_out_urls_count import (
SplitOptInOutUrlsCountJobRunner,
)
from worker.job_runners.split.opt_in_out_urls_scan_from_streaming import (
SplitOptInOutUrlsScanJobRunner,
)
class BaseJobRunnerFactory(ABC):
"""
Base class for job runner factories. A job runner factory is a class that creates a job runner.
It cannot be instantiated directly, but must be subclassed.
Note that this class is only implemented once in the code, but we need it for the tests.
"""
def create_job_runner(self, job_info: JobInfo) -> JobRunner:
return self._create_job_runner(job_info=job_info)
@abstractmethod
def _create_job_runner(self, job_info: JobInfo) -> JobRunner:
pass
@dataclass
class JobRunnerFactory(BaseJobRunnerFactory):
app_config: AppConfig
processing_graph: ProcessingGraph
hf_datasets_cache: Path
assets_directory: StrPath
parquet_metadata_directory: StrPath
duckdb_index_cache_directory: StrPath
statistics_cache_directory: StrPath
def _create_job_runner(self, job_info: JobInfo) -> JobRunner:
job_type = job_info["type"]
try:
processing_step = self.processing_graph.get_processing_step_by_job_type(job_type)
except ValueError as e:
raise ValueError(
f"Unsupported job type: '{job_type}'. The job types declared in the processing graph are:"
f" {[processing_step.job_type for processing_step in self.processing_graph.get_processing_steps()]}"
) from e
if job_type == DatasetConfigNamesJobRunner.get_job_type():
return DatasetConfigNamesJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
hf_datasets_cache=self.hf_datasets_cache,
)
if job_type == ConfigSplitNamesFromStreamingJobRunner.get_job_type():
return ConfigSplitNamesFromStreamingJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
hf_datasets_cache=self.hf_datasets_cache,
)
if job_type == SplitFirstRowsFromStreamingJobRunner.get_job_type():
return SplitFirstRowsFromStreamingJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
hf_datasets_cache=self.hf_datasets_cache,
assets_directory=self.assets_directory,
)
if job_type == ConfigParquetAndInfoJobRunner.get_job_type():
return ConfigParquetAndInfoJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
hf_datasets_cache=self.hf_datasets_cache,
)
if job_type == ConfigParquetJobRunner.get_job_type():
return ConfigParquetJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == ConfigParquetMetadataJobRunner.get_job_type():
return ConfigParquetMetadataJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
parquet_metadata_directory=self.parquet_metadata_directory,
)
if job_type == DatasetParquetJobRunner.get_job_type():
return DatasetParquetJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == DatasetInfoJobRunner.get_job_type():
return DatasetInfoJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == ConfigInfoJobRunner.get_job_type():
return ConfigInfoJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == DatasetSizeJobRunner.get_job_type():
return DatasetSizeJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == ConfigSizeJobRunner.get_job_type():
return ConfigSizeJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == ConfigSplitNamesFromInfoJobRunner.get_job_type():
return ConfigSplitNamesFromInfoJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == DatasetSplitNamesJobRunner.get_job_type():
return DatasetSplitNamesJobRunner(
job_info=job_info,
processing_step=processing_step,
app_config=self.app_config,
)
if job_type == SplitFirstRowsFromParquetJobRunner.get_job_type():
return SplitFirstRowsFromParquetJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
processing_graph=self.processing_graph,
assets_directory=self.assets_directory,
parquet_metadata_directory=self.parquet_metadata_directory,
)
if job_type == SplitIsValidJobRunner.get_job_type():
return SplitIsValidJobRunner(
job_info=job_info,
processing_step=processing_step,
processing_graph=self.processing_graph,
app_config=self.app_config,
)
if job_type == ConfigIsValidJobRunner.get_job_type():
return ConfigIsValidJobRunner(
job_info=job_info,
processing_step=processing_step,
app_config=self.app_config,
)
if job_type == DatasetIsValidJobRunner.get_job_type():
return DatasetIsValidJobRunner(
job_info=job_info,
processing_step=processing_step,
app_config=self.app_config,
)
if job_type == SplitImageUrlColumnsJobRunner.get_job_type():
return SplitImageUrlColumnsJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == SplitOptInOutUrlsScanJobRunner.get_job_type():
return SplitOptInOutUrlsScanJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
hf_datasets_cache=self.hf_datasets_cache,
)
if job_type == ConfigOptInOutUrlsCountJobRunner.get_job_type():
return ConfigOptInOutUrlsCountJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == DatasetOptInOutUrlsCountJobRunner.get_job_type():
return DatasetOptInOutUrlsCountJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == SplitOptInOutUrlsCountJobRunner.get_job_type():
return SplitOptInOutUrlsCountJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
if job_type == SplitDescriptiveStatisticsJobRunner.get_job_type():
return SplitDescriptiveStatisticsJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
statistics_cache_directory=self.statistics_cache_directory,
)
if job_type == SplitDuckDbIndexJobRunner.get_job_type():
return SplitDuckDbIndexJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
duckdb_index_cache_directory=self.duckdb_index_cache_directory,
)
if job_type == DatasetHubCacheJobRunner.get_job_type():
return DatasetHubCacheJobRunner(
job_info=job_info,
app_config=self.app_config,
processing_step=processing_step,
)
supported_job_types = [
DatasetConfigNamesJobRunner.get_job_type(),
ConfigSplitNamesFromStreamingJobRunner.get_job_type(),
SplitFirstRowsFromStreamingJobRunner.get_job_type(),
ConfigParquetAndInfoJobRunner.get_job_type(),
ConfigParquetJobRunner.get_job_type(),
DatasetParquetJobRunner.get_job_type(),
DatasetInfoJobRunner.get_job_type(),
ConfigInfoJobRunner.get_job_type(),
DatasetSizeJobRunner.get_job_type(),
ConfigSizeJobRunner.get_job_type(),
ConfigSplitNamesFromInfoJobRunner.get_job_type(),
SplitFirstRowsFromParquetJobRunner.get_job_type(),
SplitIsValidJobRunner.get_job_type(),
ConfigIsValidJobRunner.get_job_type(),
DatasetIsValidJobRunner.get_job_type(),
SplitImageUrlColumnsJobRunner.get_job_type(),
SplitOptInOutUrlsScanJobRunner.get_job_type(),
SplitOptInOutUrlsCountJobRunner.get_job_type(),
ConfigOptInOutUrlsCountJobRunner.get_job_type(),
DatasetOptInOutUrlsCountJobRunner.get_job_type(),
SplitDuckDbIndexJobRunner.get_job_type(),
SplitDescriptiveStatisticsJobRunner.get_job_type(),
DatasetHubCacheJobRunner.get_job_type(),
]
raise ValueError(f"Unsupported job type: '{job_type}'. The supported job types are: {supported_job_types}")
| datasets-server-main | services/worker/src/worker/job_runner_factory.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from abc import ABC, abstractmethod
from typing import Optional
from libcommon.processing_graph import ProcessingStep
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.dtos import JobResult, JobRunnerInfo
class JobRunner(ABC):
job_info: JobInfo
app_config: AppConfig
processing_step: ProcessingStep
@staticmethod
@abstractmethod
def get_job_type() -> str:
pass
@staticmethod
@abstractmethod
def get_job_runner_version() -> int:
pass
@staticmethod
def get_parallel_job_runner() -> Optional[JobRunnerInfo]: # In the future it could be a list
return None
def __init__(self, job_info: JobInfo, app_config: AppConfig, processing_step: ProcessingStep) -> None:
self.job_info = job_info
self.app_config = app_config
self.processing_step = processing_step
def pre_compute(self) -> None:
"""Hook method called before the compute method."""
pass
@abstractmethod
def compute(self) -> JobResult:
pass
def post_compute(self) -> None:
"""Hook method called after the compute method."""
pass
def validate(self) -> None:
"""
Validate that this job should be run.
It should raise an error if e.g. the config/split of the dataset to process doesn't exist.
"""
pass
| datasets-server-main | services/worker/src/worker/job_runner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import os
import tempfile
from libcommon.log import init_logging
from libcommon.processing_graph import ProcessingGraph
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.storage import (
init_assets_dir,
init_duckdb_index_cache_dir,
init_parquet_metadata_dir,
init_statistics_cache_dir,
)
from worker.config import AppConfig
from worker.executor import WorkerExecutor
from worker.job_runner_factory import JobRunnerFactory
from worker.resources import LibrariesResource
WORKER_STATE_FILE_NAME = "worker_state.json"
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmp_dir:
state_file_path = os.path.join(tmp_dir, WORKER_STATE_FILE_NAME)
os.environ["WORKER_STATE_FILE_PATH"] = state_file_path
app_config = AppConfig.from_env()
init_logging(level=app_config.log.level)
# ^ set first to have logs as soon as possible
assets_directory = init_assets_dir(directory=app_config.assets.storage_directory)
parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory)
duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=app_config.duckdb_index.cache_directory)
statistics_cache_directory = init_statistics_cache_dir(app_config.descriptive_statistics.cache_directory)
processing_graph = ProcessingGraph(app_config.processing_graph.specification)
with (
LibrariesResource(
hf_endpoint=app_config.common.hf_endpoint,
init_hf_datasets_cache=app_config.datasets_based.hf_datasets_cache,
numba_path=app_config.numba.path,
) as libraries_resource,
CacheMongoResource(
database=app_config.cache.mongo_database, host=app_config.cache.mongo_url
) as cache_resource,
QueueMongoResource(
database=app_config.queue.mongo_database, host=app_config.queue.mongo_url
) as queue_resource,
):
if not cache_resource.is_available():
raise RuntimeError("The connection to the cache database could not be established. Exiting.")
if not queue_resource.is_available():
raise RuntimeError("The connection to the queue database could not be established. Exiting.")
job_runner_factory = JobRunnerFactory(
app_config=app_config,
processing_graph=processing_graph,
hf_datasets_cache=libraries_resource.hf_datasets_cache,
assets_directory=assets_directory,
parquet_metadata_directory=parquet_metadata_directory,
duckdb_index_cache_directory=duckdb_index_cache_directory,
statistics_cache_directory=statistics_cache_directory,
)
worker_executor = WorkerExecutor(
app_config=app_config,
job_runner_factory=job_runner_factory,
state_file_path=state_file_path,
)
worker_executor.start()
| datasets-server-main | services/worker/src/worker/main.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import asyncio
import logging
import os
import sys
from collections.abc import Callable
from datetime import datetime, timedelta
from random import random
from typing import Any, Optional, Union
import orjson
from filelock import FileLock
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import Queue
from libcommon.utils import get_datetime
from mirakuru import OutputExecutor
from worker import start_worker_loop
from worker.config import AppConfig
from worker.job_manager import JobManager
from worker.job_runner_factory import JobRunnerFactory
from worker.loop import WorkerState
START_WORKER_LOOP_PATH = start_worker_loop.__file__
async def every(
func: Callable[..., Optional[Any]],
*args: Any,
seconds: Union[float, tuple[float, float]],
stop_on: Optional[Any] = None,
**kwargs: Any,
) -> None:
while True:
out = func(*args, **kwargs)
if stop_on is not None and out == stop_on:
break
delay = (
seconds[0] + (seconds[1] - seconds[0]) * random() if isinstance(seconds, tuple) else seconds # nosec B311
)
await asyncio.sleep(delay)
class BadWorkerState(RuntimeError):
"""Raised when the worker state from the worker read by the executor is not valid."""
pass
class WorkerExecutor:
def __init__(self, app_config: AppConfig, job_runner_factory: JobRunnerFactory, state_file_path: str) -> None:
self.app_config = app_config
self.job_runner_factory = job_runner_factory
self.state_file_path = state_file_path
self.processing_graph = ProcessingGraph(self.app_config.processing_graph.specification)
max_missing_heartbeats = self.app_config.worker.max_missing_heartbeats
heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds
self.max_seconds_without_heartbeat_for_zombies = heartbeat_interval_seconds * max_missing_heartbeats
self.heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds
self.max_job_duration_seconds = self.app_config.worker.max_job_duration_seconds
self.kill_zombies_interval_seconds = self.app_config.worker.kill_zombies_interval_seconds
self.kill_long_job_interval_seconds = self.app_config.worker.kill_long_job_interval_seconds
def _create_worker_loop_executor(self) -> OutputExecutor:
banner = self.state_file_path
start_worker_loop_command = [
sys.executable,
START_WORKER_LOOP_PATH,
"--print-worker-state-path",
]
return OutputExecutor(start_worker_loop_command, banner, timeout=10)
def start(self) -> None:
exceptions = []
worker_loop_executor = self._create_worker_loop_executor()
worker_loop_executor.start() # blocking until the banner is printed
def custom_exception_handler(loop: asyncio.AbstractEventLoop, context: dict[str, Any]) -> None:
nonlocal exceptions
# first, handle with default handler
loop.default_exception_handler(context)
exception = context.get("exception")
if exception:
exceptions.append(repr(exception))
loop.stop()
loop = asyncio.get_event_loop()
loop.set_exception_handler(custom_exception_handler)
logging.info("Starting heartbeat.")
loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))
loop.create_task(
every(
self.kill_zombies,
seconds=(
self.kill_zombies_interval_seconds * 0.5,
self.kill_zombies_interval_seconds * 1.5,
),
)
)
loop.create_task(
every(
self.kill_long_job,
worker_loop_executor=worker_loop_executor,
seconds=(
self.kill_long_job_interval_seconds * 0.5,
self.kill_long_job_interval_seconds * 1.5,
),
)
)
loop.run_until_complete(
every(self.is_worker_alive, worker_loop_executor=worker_loop_executor, seconds=1.0, stop_on=False)
)
if exceptions:
raise RuntimeError(f"Some async tasks failed: {exceptions}")
def get_state(self) -> Optional[WorkerState]:
worker_state_file_path = self.state_file_path
if not os.path.exists(worker_state_file_path):
return None
with FileLock(f"{worker_state_file_path}.lock"):
try:
with open(worker_state_file_path, "rb") as worker_state_f:
worker_state = orjson.loads(worker_state_f.read())
return WorkerState(
current_job_info=worker_state.get("current_job_info"),
last_updated=datetime.fromisoformat(worker_state["last_updated"]),
)
except (orjson.JSONDecodeError, KeyError) as err:
raise BadWorkerState(f"Failed to read worker state at {worker_state_file_path}") from err
def heartbeat(self) -> None:
worker_state = self.get_state()
if worker_state and worker_state["current_job_info"]:
Queue().heartbeat(job_id=worker_state["current_job_info"]["job_id"])
def kill_zombies(self) -> None:
queue = Queue()
zombies = queue.get_zombies(max_seconds_without_heartbeat=self.max_seconds_without_heartbeat_for_zombies)
message = "Job manager crashed while running this job (missing heartbeats)."
for zombie in zombies:
job_runner = self.job_runner_factory.create_job_runner(zombie)
job_manager = JobManager(
job_info=zombie,
app_config=self.app_config,
job_runner=job_runner,
processing_graph=self.processing_graph,
)
job_manager.set_crashed(message=message)
logging.info(f"Killing zombie. Job info = {zombie}")
def kill_long_job(self, worker_loop_executor: OutputExecutor) -> None:
worker_state = self.get_state()
if worker_state and worker_state["current_job_info"]:
long_job = worker_state["current_job_info"]
last_updated = worker_state["last_updated"]
coefficient = 10 if long_job["params"]["dataset"] == "cerebras/SlimPajama-627B" else 1
if last_updated + timedelta(seconds=coefficient * self.max_job_duration_seconds) <= get_datetime():
_duration_seconds = int((get_datetime() - last_updated).total_seconds())
logging.warning(
f"Job {long_job} exceeded maximum duration of"
f" {self.max_job_duration_seconds} seconds ({_duration_seconds} seconds)."
)
try:
worker_loop_executor.stop() # raises an error if the worker returned exit code 1
finally:
logging.info(f"Killing a long job. Job info = {long_job}")
job_runner = self.job_runner_factory.create_job_runner(long_job)
job_manager = JobManager(
job_info=long_job,
app_config=self.app_config,
job_runner=job_runner,
processing_graph=self.processing_graph,
)
message = "Job manager was killed while running this job (job exceeded maximum duration)."
job_manager.set_exceeded_maximum_duration(message=message)
def is_worker_alive(self, worker_loop_executor: OutputExecutor) -> bool:
if worker_loop_executor.running():
return True
worker_loop_executor.stop() # raises an error if the worker returned exit code 1
return False
| datasets-server-main | services/worker/src/worker/executor.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import json
import random
import re
from hashlib import sha1
from pathlib import Path
from typing import Optional
from libcommon.exceptions import DiskError
from libcommon.processing_graph import ProcessingStep
from libcommon.storage import init_dir, remove_dir
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.job_runner import JobRunner
class JobRunnerWithCache(JobRunner):
"""Base class for job runners that use a temporary cache directory."""
base_cache_directory: Path
cache_subdirectory: Optional[Path] = None
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
cache_directory: Path,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
self.base_cache_directory = cache_directory
def get_cache_subdirectory(self, digits: int = 14) -> str:
random_str = f"{random.randrange(10**(digits - 1), 10**digits)}" # nosec B311
# TODO: Refactor, need a way to generate payload based only on provided params
payload = (
random_str,
self.get_job_type(),
self.job_info["params"]["dataset"],
self.job_info["params"]["config"],
self.job_info["params"]["split"],
)
hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]
prefix = f"{random_str}-{self.get_job_type()}-{self.job_info['params']['dataset']}"[:64]
subdirectory = f"{prefix}-{hash_suffix}"
return "".join([c if re.match(r"[\w-]", c) else "-" for c in subdirectory])
def pre_compute(self) -> None:
new_directory = self.base_cache_directory / self.get_cache_subdirectory()
try:
self.cache_subdirectory = Path(init_dir(new_directory))
except PermissionError as e:
raise DiskError(f"Incorrect permissions on {new_directory}", e) from e
def post_compute(self) -> None:
# empty the cache after the job to save storage space
previous_cache = self.cache_subdirectory
if previous_cache is not None:
remove_dir(previous_cache)
self.cache_subdirectory = None
| datasets-server-main | services/worker/src/worker/job_runners/_job_runner_with_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/src/worker/job_runners/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from pathlib import Path
from typing import Optional
import datasets.config
from libcommon.processing_graph import ProcessingStep
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.job_runners._job_runner_with_cache import JobRunnerWithCache
class JobRunnerWithDatasetsCache(JobRunnerWithCache):
"""Base class for job runners that use datasets."""
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
hf_datasets_cache: Path,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
cache_directory=hf_datasets_cache,
)
def set_datasets_cache(self, cache_subdirectory: Optional[Path]) -> None:
datasets.config.HF_DATASETS_CACHE = cache_subdirectory
logging.debug(f"datasets data cache set to: {datasets.config.HF_DATASETS_CACHE}")
datasets.config.DOWNLOADED_DATASETS_PATH = (
datasets.config.HF_DATASETS_CACHE / datasets.config.DOWNLOADED_DATASETS_DIR
)
datasets.config.EXTRACTED_DATASETS_PATH = (
datasets.config.HF_DATASETS_CACHE / datasets.config.EXTRACTED_DATASETS_DIR
)
def pre_compute(self) -> None:
super().pre_compute()
self.set_datasets_cache(self.cache_subdirectory)
def post_compute(self) -> None:
super().post_compute()
self.set_datasets_cache(self.base_cache_directory)
| datasets-server-main | services/worker/src/worker/job_runners/_job_runner_with_datasets_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from asyncio import Semaphore, create_task, run, wait
from pathlib import Path
from typing import Any, Optional
from aiohttp import ClientSession
from aiolimiter import AsyncLimiter
from datasets import get_dataset_config_info
from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION
from libcommon.exceptions import (
ExternalServerError,
InfoError,
MissingSpawningTokenError,
PreviousStepFormatError,
TooManyColumnsError,
)
from libcommon.processing_graph import ProcessingStep
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.utils import JobInfo
from worker.config import AppConfig, OptInOutUrlsScanConfig
from worker.dtos import CompleteJobResult, OptInOutUrlsScanResponse, OptUrl
from worker.job_runners.split.split_job_runner import SplitJobRunnerWithDatasetsCache
from worker.utils import get_rows_or_raise
async def check_spawning(
image_urls: list[str], session: ClientSession, semaphore: Semaphore, limiter: AsyncLimiter, spawning_url: str
) -> Any:
if not image_urls:
return {"urls": []}
elif len(image_urls) == 1:
image_urls = image_urls + [""] # the API requires >1 urls
async with semaphore:
async with limiter:
async with session.post(url=spawning_url, data="\n".join(image_urls)) as resp:
spawning_response = await resp.json()
return spawning_response
async def opt_in_out_task(
image_urls: list[str], session: ClientSession, semaphore: Semaphore, limiter: AsyncLimiter, spawning_url: str
) -> tuple[list[Any], list[Any]]:
try:
spawning_response = await check_spawning(image_urls, session, semaphore, limiter, spawning_url)
except Exception:
raise ExternalServerError(message=f"Error when trying to connect to {spawning_url}")
if "urls" not in spawning_response:
raise ExternalServerError(message=f"Error when trying to connect to {spawning_url}: '{spawning_response}'")
opt_in_urls_indices = [i for i in range(len(image_urls)) if spawning_response["urls"][i]["optIn"]]
opt_out_urls_indices = [i for i in range(len(image_urls)) if spawning_response["urls"][i]["optOut"]]
return opt_in_urls_indices, opt_out_urls_indices
async def opt_in_out_scan_urls(
urls: list[str],
urls_number_per_batch: int,
spawning_token: str,
max_concurrent_requests_number: int,
max_requests_per_second: int,
spawning_url: str,
) -> tuple[list[int], list[int]]:
offsets = []
tasks = []
semaphore = Semaphore(value=max_concurrent_requests_number)
limiter = AsyncLimiter(max_requests_per_second, time_period=1)
headers = {"Authorization": f"API {spawning_token}"}
async with ClientSession(headers=headers) as session:
for offset in range(0, len(urls), urls_number_per_batch):
offsets.append(offset)
limit = offset + urls_number_per_batch
tasks.append(
create_task(opt_in_out_task(urls[offset:limit], session, semaphore, limiter, spawning_url))
) # noqa: E203
await wait(tasks)
opt_in_urls_indices = []
opt_out_urls_indices = []
for offset, task in zip(offsets, tasks):
batch_opt_in_urls_indices, batch_opt_out_urls_indices = task.result()
for batch_opt_in_urls_idx in batch_opt_in_urls_indices:
opt_in_urls_indices.append(offset + batch_opt_in_urls_idx)
for batch_opt_out_urls_idx in batch_opt_out_urls_indices:
opt_out_urls_indices.append(offset + batch_opt_out_urls_idx)
return opt_in_urls_indices, opt_out_urls_indices
def compute_opt_in_out_urls_scan_response(
dataset: str,
config: str,
split: str,
hf_token: Optional[str],
rows_max_number: int,
columns_max_number: int,
urls_number_per_batch: int,
spawning_token: Optional[str],
max_concurrent_requests_number: int,
max_requests_per_second: int,
spawning_url: str,
) -> OptInOutUrlsScanResponse:
"""
Get the response of split-opt-in-out-urls-scan cache for a specific split of a dataset from huggingface.co.
The response is not used directly in the API but it is an input for config-opt-in-out-urls-scan processing step.
Note that only image URLs are scanned, see image_url_columns.py for details about the detection heuristic.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
split (`str`):
A split name.
hf_token (`str` or `None`):
An authentication token (See https://huggingface.co/settings/token)
rows_max_number (`int`):
The maximum number of rows of the response.
columns_max_number (`int`):
The maximum number of supported columns.
urls_number_per_batch (`int`):
The number of batch URLs to be sent to spawning service.
spawning_token (`str` or `None`):
An authentication token to use spawning service (See https://api.spawning.ai/spawning-api)
max_concurrent_requests_number (`int`):
The maximum number of requests to be processed concurrently.
max_requests_per_second (`int`):
The maximum number of requests to be processed by second.
spawning_url (`str`):
Spawgning API URL
Returns:
[`OptInOutUrlsScanResponse`]
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
- [`libcommon.exceptions.InfoError`]
If the config info could not be obtained using the datasets library.
- [`libcommon.exceptions.TooManyColumnsError`]
If the number of columns (features) exceeds the maximum supported number of columns.
- [`libcommon.exceptions.StreamingRowsError`]
If the split rows could not be obtained using the datasets library in streaming mode.
- [`libcommon.exceptions.NormalRowsError`]
If the split rows could not be obtained using the datasets library in normal mode.
"""
logging.info(f"get opt-in-out-urls-scan for dataset={dataset} config={config} split={split}")
if not spawning_token:
raise MissingSpawningTokenError("OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN is not set")
# get image url columns from previous job
upstream_response = get_previous_step_or_raise(
kinds=["split-image-url-columns"],
dataset=dataset,
config=config,
split=split,
)
try:
image_url_columns_response = upstream_response.response
image_url_columns = image_url_columns_response["content"]["columns"]
except KeyError as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
# get the info
try:
info = get_dataset_config_info(
path=dataset,
config_name=config,
token=hf_token,
)
except Exception as err:
raise InfoError(
f"The info cannot be fetched for the config '{config}' of the dataset.",
cause=err,
) from err
if not image_url_columns:
return OptInOutUrlsScanResponse(
urls_columns=[],
opt_in_urls=[],
opt_out_urls=[],
num_opt_in_urls=0,
num_opt_out_urls=0,
num_urls=0,
num_scanned_rows=0,
has_urls_columns=False,
full_scan=None,
)
if len(image_url_columns) > columns_max_number:
raise TooManyColumnsError(
f"The number of columns ({len(image_url_columns)}) exceeds the maximum supported number of columns to scan"
f" ({columns_max_number})."
)
# get the rows
rows_content = get_rows_or_raise(
dataset=dataset,
config=config,
split=split,
info=info,
rows_max_number=rows_max_number,
token=hf_token,
column_names=image_url_columns,
)
rows = rows_content["rows"]
# get the urls
num_scanned_rows = len(rows)
urls = [row[urls_column] for row in rows for urls_column in image_url_columns]
# scan the urls
opt_in_urls_indices, opt_out_urls_indices = run(
opt_in_out_scan_urls(
urls,
urls_number_per_batch=urls_number_per_batch,
spawning_token=spawning_token,
max_concurrent_requests_number=max_concurrent_requests_number,
max_requests_per_second=max_requests_per_second,
spawning_url=spawning_url,
)
)
opt_in_urls = [
OptUrl(
url=urls[url_idx],
row_idx=url_idx // len(image_url_columns),
column_name=image_url_columns[url_idx % len(image_url_columns)],
)
for url_idx in opt_in_urls_indices
]
opt_out_urls = [
OptUrl(
url=urls[url_idx],
row_idx=url_idx // len(image_url_columns),
column_name=image_url_columns[url_idx % len(image_url_columns)],
)
for url_idx in opt_out_urls_indices
]
# return scan result
return OptInOutUrlsScanResponse(
urls_columns=image_url_columns,
opt_in_urls=opt_in_urls,
opt_out_urls=opt_out_urls,
num_opt_in_urls=len(opt_in_urls),
num_opt_out_urls=len(opt_out_urls),
num_urls=len(urls),
num_scanned_rows=num_scanned_rows,
has_urls_columns=True,
full_scan=rows_content["all_fetched"],
)
class SplitOptInOutUrlsScanJobRunner(SplitJobRunnerWithDatasetsCache):
urls_scan_config: OptInOutUrlsScanConfig
@staticmethod
def get_job_type() -> str:
return "split-opt-in-out-urls-scan"
# ^ TODO: Change step name referring to image URLs scan specifically.
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
hf_datasets_cache: Path,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
hf_datasets_cache=hf_datasets_cache,
)
self.urls_scan_config = app_config.urls_scan
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_opt_in_out_urls_scan_response(
dataset=self.dataset,
config=self.config,
split=self.split,
hf_token=self.app_config.common.hf_token,
rows_max_number=self.urls_scan_config.rows_max_number,
columns_max_number=self.urls_scan_config.columns_max_number,
urls_number_per_batch=self.urls_scan_config.urls_number_per_batch,
spawning_token=self.urls_scan_config.spawning_token,
max_concurrent_requests_number=self.urls_scan_config.max_concurrent_requests_number,
max_requests_per_second=self.urls_scan_config.max_requests_per_second,
spawning_url=self.urls_scan_config.spawning_url,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/src/worker/job_runners/split/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.constants import PROCESSING_STEP_SPLIT_IS_VALID_VERSION
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.simple_cache import has_any_successful_response
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.dtos import CompleteJobResult, IsValidResponse, JobResult
from worker.job_runners.split.split_job_runner import SplitJobRunner
def compute_is_valid_response(
dataset: str, config: str, split: str, processing_graph: ProcessingGraph
) -> IsValidResponse:
"""
Get the response of /is-valid for one specific dataset split on huggingface.co.
A dataset split is valid if any of the artifacts for any of the
steps is valid.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
split (`str`):
A split name.
processing_graph (`ProcessingGraph`):
The processing graph. In particular, it must provide the list of
processing steps that enable the viewer and the preview.
Returns:
`IsValidResponse`: The response (viewer, preview, search).
"""
logging.info(f"get is-valid response for dataset={dataset}")
viewer = has_any_successful_response(
dataset=dataset,
config=config,
split=None,
kinds=[step.cache_kind for step in processing_graph.get_processing_steps_enables_viewer()],
)
preview = has_any_successful_response(
dataset=dataset,
config=config,
split=split,
kinds=[step.cache_kind for step in processing_graph.get_processing_steps_enables_preview()],
)
search = has_any_successful_response(
dataset=dataset,
config=config,
split=split,
kinds=[step.cache_kind for step in processing_graph.get_processing_steps_enables_search()],
)
return IsValidResponse(viewer=viewer, preview=preview, search=search)
class SplitIsValidJobRunner(SplitJobRunner):
@staticmethod
def get_job_type() -> str:
return "split-is-valid"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_IS_VALID_VERSION
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
processing_graph: ProcessingGraph,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
self.processing_graph = processing_graph
def compute(self) -> JobResult:
return CompleteJobResult(
compute_is_valid_response(
dataset=self.dataset, config=self.config, split=self.split, processing_graph=self.processing_graph
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/is_valid.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from pathlib import Path
from libcommon.exceptions import ParameterMissingError
from libcommon.processing_graph import ProcessingStep
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.job_runners._job_runner_with_cache import JobRunnerWithCache
from worker.job_runners._job_runner_with_datasets_cache import (
JobRunnerWithDatasetsCache,
)
from worker.job_runners.config.config_job_runner import ConfigJobRunner
from worker.utils import check_split_exists
class SplitJobRunner(ConfigJobRunner):
split: str
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
) -> None:
super().__init__(job_info=job_info, app_config=app_config, processing_step=processing_step)
if job_info["params"]["split"] is None:
raise ParameterMissingError("'split' parameter is required")
self.split = job_info["params"]["split"]
def validate(self) -> None:
check_split_exists(dataset=self.dataset, config=self.config, split=self.split)
class SplitJobRunnerWithDatasetsCache(JobRunnerWithDatasetsCache, SplitJobRunner):
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
hf_datasets_cache: Path,
) -> None:
JobRunnerWithDatasetsCache.__init__(
self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
hf_datasets_cache=hf_datasets_cache,
)
SplitJobRunner.__init__(
self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
class SplitJobRunnerWithCache(JobRunnerWithCache, SplitJobRunner):
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
cache_directory: Path,
) -> None:
JobRunnerWithCache.__init__(
self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
cache_directory=cache_directory,
)
SplitJobRunner.__init__(
self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
| datasets-server-main | services/worker/src/worker/job_runners/split/split_job_runner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from pathlib import Path
from typing import Optional
from datasets import (
Audio,
Features,
Image,
IterableDataset,
get_dataset_config_info,
load_dataset,
)
from libcommon.constants import (
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
)
from libcommon.exceptions import (
FeaturesError,
InfoError,
RowsPostProcessingError,
TooBigContentError,
TooManyColumnsError,
)
from libcommon.processing_graph import ProcessingStep
from libcommon.storage import StrPath
from libcommon.utils import JobInfo, Row
from libcommon.viewer_utils.features import get_cell_value, to_features_list
from worker.config import AppConfig, FirstRowsConfig
from worker.dtos import CompleteJobResult, JobRunnerInfo, SplitFirstRowsResponse
from worker.job_runners.split.split_job_runner import SplitJobRunnerWithDatasetsCache
from worker.utils import create_truncated_row_items, get_json_size, get_rows_or_raise
def transform_rows(
dataset: str,
config: str,
split: str,
rows: list[Row],
features: Features,
assets_base_url: str,
assets_directory: StrPath,
) -> list[Row]:
return [
{
featureName: get_cell_value(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
cell=row[featureName] if featureName in row else None,
featureName=featureName,
fieldType=fieldType,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
)
for (featureName, fieldType) in features.items()
}
for row_idx, row in enumerate(rows)
]
def compute_first_rows_response(
dataset: str,
config: str,
split: str,
assets_base_url: str,
hf_token: Optional[str],
min_cell_bytes: int,
rows_max_bytes: int,
rows_max_number: int,
rows_min_number: int,
columns_max_number: int,
assets_directory: StrPath,
max_size_fallback: Optional[int] = None,
) -> SplitFirstRowsResponse:
"""
Get the response of /first-rows for one specific split of a dataset from huggingface.co.
Dataset can be private or gated if you pass an acceptable token.
It is assumed that the dataset exist and can be accessed using the token.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
split (`str`):
A split name.
assets_base_url (`str`):
The base url of the assets.
hf_endpoint (`str`):
The Hub endpoint (for example: "https://huggingface.co")
hf_token (`str` or `None`):
An authentication token (See https://huggingface.co/settings/token)
max_size_fallback (`int` or `None`): **DEPRECATED**
The maximum number of bytes of the split to fallback to normal mode if the streaming mode fails.
This argument is now hard-coded to 100MB, and will be removed in a future version.
rows_max_bytes (`int`):
The maximum number of bytes of the response (else, the response is truncated).
rows_max_number (`int`):
The maximum number of rows of the response.
rows_min_number (`int`):
The minimum number of rows of the response.
columns_max_number (`int`):
The maximum number of columns supported.
assets_directory (`str` or `pathlib.Path`):
The directory where the assets are stored.
Returns:
[`SplitFirstRowsResponse`]: The list of first rows of the split.
Raises the following errors:
- [`libcommon.exceptions.SplitNotFoundError`]
If the split does not exist in the dataset.
- [`libcommon.exceptions.InfoError`]
If the config info could not be obtained using the datasets library.
- [`libcommon.exceptions.FeaturesError`]
If the split features could not be obtained using the datasets library.
- [`libcommon.exceptions.RowsPostProcessingError`]
If the post-processing of the split rows failed, e.g. while saving the images or audio files to the assets.
- [`libcommon.exceptions.TooManyColumnsError`]
If the number of columns (features) exceeds the maximum supported number of columns.
- [`libcommon.exceptions.TooBigContentError`]
If the first rows content exceeds the maximum supported size of bytes.
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
- [`libcommon.exceptions.StreamingRowsError`]
If the split rows could not be obtained using the datasets library in streaming mode.
- [`libcommon.exceptions.NormalRowsError`]
If the split rows could not be obtained using the datasets library in normal mode.
"""
logging.info(f"get first-rows for dataset={dataset} config={config} split={split}")
# get the features
try:
info = get_dataset_config_info(
path=dataset,
config_name=config,
token=hf_token,
)
except Exception as err:
raise InfoError(
f"The info cannot be fetched for the config '{config}' of the dataset.",
cause=err,
) from err
if not info.features:
try:
# https://github.com/huggingface/datasets/blob/f5826eff9b06ab10dba1adfa52543341ef1e6009/src/datasets/iterable_dataset.py#L1255
iterable_dataset = load_dataset(
path=dataset,
name=config,
split=split,
streaming=True,
token=hf_token,
)
if not isinstance(iterable_dataset, IterableDataset):
raise TypeError("load_dataset should return an IterableDataset.")
iterable_dataset = iterable_dataset._resolve_features()
if not isinstance(iterable_dataset, IterableDataset):
raise TypeError("load_dataset should return an IterableDataset.")
features = iterable_dataset.features
except Exception as err:
raise FeaturesError(
(
f"Cannot extract the features (columns) for the split '{split}' of the config '{config}' of the"
" dataset."
),
cause=err,
) from err
else:
features = info.features
if features and len(features) > columns_max_number:
raise TooManyColumnsError(
f"The number of columns ({len(features)}) exceeds the maximum supported number of columns"
f" ({columns_max_number}). This is a current limitation of the datasets viewer. You can reduce the number"
" of columns if you want the viewer to work."
)
# validate size of response without the rows
features_list = to_features_list(features=features)
response_features_only: SplitFirstRowsResponse = {
"dataset": dataset,
"config": config,
"split": split,
"features": features_list,
"rows": [],
"truncated": False,
}
surrounding_json_size = get_json_size(response_features_only)
if surrounding_json_size > rows_max_bytes:
raise TooBigContentError(
f"The size of the content of the first rows ({surrounding_json_size} B) exceeds the maximum"
f" supported size ({rows_max_bytes} B) even after truncation. Please report the issue."
)
# get the rows
rows_content = get_rows_or_raise(
dataset=dataset,
config=config,
split=split,
info=info,
max_size_fallback=max_size_fallback,
rows_max_number=rows_max_number,
token=hf_token,
)
rows = rows_content["rows"]
all_fetched = rows_content["all_fetched"]
# transform the rows, if needed (e.g. save the images or audio to the assets, and return their URL)
try:
transformed_rows = transform_rows(
dataset=dataset,
config=config,
split=split,
rows=rows,
features=features,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
)
except Exception as err:
raise RowsPostProcessingError(
"Server error while post-processing the split rows. Please report the issue.",
cause=err,
) from err
# truncate the rows to fit within the restrictions, and prepare them as RowItems
columns_to_keep_untruncated = [col for col, feature in features.items() if isinstance(feature, (Image, Audio))]
row_items, truncated = create_truncated_row_items(
rows=transformed_rows,
min_cell_bytes=min_cell_bytes,
rows_max_bytes=rows_max_bytes - surrounding_json_size,
rows_min_number=rows_min_number,
columns_to_keep_untruncated=columns_to_keep_untruncated,
)
response = response_features_only
response["rows"] = row_items
response["truncated"] = (not all_fetched) or truncated
# return the response
return response
class SplitFirstRowsFromStreamingJobRunner(SplitJobRunnerWithDatasetsCache):
assets_directory: StrPath
first_rows_config: FirstRowsConfig
@staticmethod
def get_job_type() -> str:
return "split-first-rows-from-streaming"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION
@staticmethod
def get_parallel_job_runner() -> JobRunnerInfo:
return JobRunnerInfo(
job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
job_type="split-first-rows-from-parquet",
)
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
hf_datasets_cache: Path,
assets_directory: StrPath,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
hf_datasets_cache=hf_datasets_cache,
)
self.first_rows_config = app_config.first_rows
self.assets_directory = assets_directory
self.assets_base_url = app_config.assets.base_url
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_first_rows_response(
dataset=self.dataset,
config=self.config,
split=self.split,
assets_base_url=self.assets_base_url,
assets_directory=self.assets_directory,
hf_token=self.app_config.common.hf_token,
min_cell_bytes=self.first_rows_config.min_cell_bytes,
rows_max_bytes=self.first_rows_config.max_bytes,
rows_max_number=self.first_rows_config.max_number,
rows_min_number=self.first_rows_config.min_number,
columns_max_number=self.first_rows_config.columns_max_number,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/first_rows_from_streaming.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import enum
import logging
import os
from pathlib import Path
from typing import Optional, TypedDict, Union
import duckdb
import numpy as np
import pandas as pd
from huggingface_hub import hf_hub_download
from libcommon.constants import PROCESSING_STEP_SPLIT_DESCRIPTIVE_STATISTICS_VERSION
from libcommon.exceptions import (
CacheDirectoryNotInitializedError,
NoSupportedFeaturesError,
ParquetResponseEmptyError,
PreviousStepFormatError,
SplitWithTooBigParquetError,
StatisticsComputationError,
)
from libcommon.processing_graph import ProcessingStep
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.storage import StrPath
from libcommon.utils import JobInfo
from tqdm import tqdm
from worker.config import AppConfig, DescriptiveStatisticsConfig
from worker.dtos import CompleteJobResult
from worker.job_runners.split.split_job_runner import SplitJobRunnerWithCache
REPO_TYPE = "dataset"
DECIMALS = 5
INTEGER_DTYPES = ["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"]
FLOAT_DTYPES = ["float16", "float32", "float64"]
NUMERICAL_DTYPES = INTEGER_DTYPES + FLOAT_DTYPES
BINS_TABLE_NAME = "bins" # name of a table with bin edges data used to compute histogram
COMPUTE_NAN_COUNTS_COMMAND = """
SELECT COUNT(*) FROM read_parquet('{parquet_filename}') WHERE {column_name} IS NULL;
"""
COMPUTE_CATEGORIES_COUNTS_COMMAND = """
SELECT {column_name}, COUNT(*) FROM read_parquet('{parquet_filename}') GROUP BY {column_name};
"""
COMPUTE_MIN_MAX_MEAN_MEDIAN_STD_COMMAND = """
SELECT min({column_name}), max({column_name}), mean({column_name}),
median({column_name}), stddev_samp({column_name}) FROM read_parquet('{parquet_filename}');
"""
COMPUTE_HIST_COMMAND = """
SELECT bin_id, COUNT(*) as count FROM read_parquet('{parquet_filename}')
JOIN {bins_table_name} ON ({column_name} >= bin_min AND {column_name} < bin_max) GROUP BY bin_id;
"""
class ColumnType(str, enum.Enum):
FLOAT = "float"
INT = "int"
CLASS_LABEL = "class_label"
class Histogram(TypedDict):
hist: list[int]
bin_edges: list[float]
class NumericalStatisticsItem(TypedDict):
nan_count: int
nan_proportion: float
min: float
max: float
mean: float
median: float
std: float
histogram: Histogram
class CategoricalStatisticsItem(TypedDict):
nan_count: int
nan_proportion: float
n_unique: int
frequencies: dict[str, int]
class StatisticsPerColumnItem(TypedDict):
column_name: str
column_type: ColumnType
column_statistics: Union[NumericalStatisticsItem, CategoricalStatisticsItem]
class SplitDescriptiveStatisticsResponse(TypedDict):
num_examples: int
statistics: list[StatisticsPerColumnItem]
def generate_bins(
min_value: Union[int, float],
max_value: Union[int, float],
column_name: str,
column_type: ColumnType,
n_bins: int,
) -> pd.DataFrame:
"""
Returns:
pandas.DataFrame with bin edges to insert into database to perform histogram computation with duckdb
"""
if column_type is ColumnType.FLOAT:
bin_size = (max_value - min_value) / n_bins
bin_edges = np.arange(min_value, max_value, bin_size).astype(float).tolist()
if len(bin_edges) != n_bins:
raise StatisticsComputationError(
f"Incorrect number of bins generated for {column_name=}, expected {n_bins}, got {len(bin_edges)}."
)
elif column_type is ColumnType.INT:
bin_size = np.ceil((max_value - min_value + 1) / n_bins)
bin_edges = np.arange(min_value, max_value + 1, bin_size).astype(int).tolist()
if len(bin_edges) > n_bins:
raise StatisticsComputationError(
f"Incorrect number of bins generated for {column_name=}, expected {n_bins}, got {len(bin_edges)}."
)
else:
raise ValueError(f"Incorrect column type of {column_name=}: {column_type}. ")
bin_max_edges = bin_edges[1:] + [max_value + 1] # add 1 to include exact max values in the last bin
return pd.DataFrame.from_dict(
{"bin_id": list(range(len(bin_edges))), "bin_min": bin_edges, "bin_max": bin_max_edges}
)
def compute_histogram(
con: duckdb.DuckDBPyConnection,
column_name: str,
parquet_filename: Path,
column_type: ColumnType,
min_value: Union[int, float],
max_value: Union[int, float],
n_bins: int,
n_samples: Optional[int] = None,
) -> Histogram:
bins_df = generate_bins(
min_value=min_value, max_value=max_value, column_name=column_name, column_type=column_type, n_bins=n_bins
)
n_bins = bins_df.shape[0]
# create auxiliary table with bin edges
con.sql(f"CREATE OR REPLACE TEMPORARY TABLE {BINS_TABLE_NAME} AS SELECT * from bins_df") # nosec
compute_hist_command = COMPUTE_HIST_COMMAND.format(
parquet_filename=parquet_filename, bins_table_name=BINS_TABLE_NAME, column_name=column_name
)
logging.debug(f"Compute histogram for {column_name=}")
# query returns list of tuples (bin_id, bin_max, n_count):
hist_query_result = dict(con.sql(compute_hist_command).fetchall()) # dict bin_id -> n_samples
if len(hist_query_result) > n_bins + 1:
raise StatisticsComputationError(
f"Got unexpected result during histogram computation for {column_name=}: returned more bins than"
f" requested. {n_bins=} {hist_query_result=}. "
)
hist = []
for bin_idx in range(n_bins):
# no key in query result = no examples in this range, so we put 0
hist.append(hist_query_result.get(bin_idx, 0))
if n_samples and sum(hist) != n_samples:
raise StatisticsComputationError(
f"Got unexpected result during histogram computation for {column_name=}: "
f" histogram sum and number of non-null samples don't match, histogram sum={sum(hist)}, {n_samples=}"
)
bins = bins_df["bin_min"].round(DECIMALS).tolist()
bins = bins + [np.round(max_value, DECIMALS).item()] # put exact max value back to bins
return Histogram(hist=hist, bin_edges=bins)
def compute_numerical_statistics(
con: duckdb.DuckDBPyConnection,
column_name: str,
parquet_filename: Path,
n_bins: int,
n_samples: int,
column_type: ColumnType,
) -> NumericalStatisticsItem:
logging.debug(f"Compute min, max, mean, median, std and proportion of null values for {column_name=}")
min_max_mean_median_std_command = COMPUTE_MIN_MAX_MEAN_MEDIAN_STD_COMMAND.format(
column_name=column_name, parquet_filename=parquet_filename
)
minimum, maximum, mean, median, std = con.sql(min_max_mean_median_std_command).fetchall()[0]
logging.debug(f"{minimum=}, {maximum=}, {mean=}, {median=}, {std=}")
nan_count_command = COMPUTE_NAN_COUNTS_COMMAND.format(column_name=column_name, parquet_filename=parquet_filename)
nan_count = con.sql(nan_count_command).fetchall()[0][0]
nan_proportion = np.round(nan_count / n_samples, DECIMALS).item() if nan_count else 0.0
logging.debug(f"{nan_count=} {nan_proportion=}")
histogram = compute_histogram(
con,
column_name,
parquet_filename,
min_value=minimum,
max_value=maximum,
column_type=column_type,
n_bins=n_bins,
n_samples=n_samples - nan_count,
)
if column_type == ColumnType.FLOAT:
minimum, maximum, mean, median, std = np.round([minimum, maximum, mean, median, std], DECIMALS).tolist()
elif column_type == ColumnType.INT:
mean, median, std = np.round([mean, median, std], DECIMALS).tolist()
else:
raise ValueError(f"Incorrect column type of {column_name=}: {column_type}")
return NumericalStatisticsItem(
nan_count=nan_count,
nan_proportion=nan_proportion,
min=minimum,
max=maximum,
mean=mean,
median=median,
std=std,
histogram=histogram,
)
def compute_categorical_statistics(
con: duckdb.DuckDBPyConnection,
column_name: str,
parquet_filename: Path,
class_label_names: list[str],
n_samples: int,
) -> CategoricalStatisticsItem:
categorical_counts_query = COMPUTE_CATEGORIES_COUNTS_COMMAND.format(
column_name=column_name, parquet_filename=parquet_filename
)
categories: list[tuple[int, int]] = con.sql(
categorical_counts_query
).fetchall() # list of tuples (idx, num_samples)
frequencies, nan_count = {}, 0
for cat_id, freq in categories:
if cat_id is not None:
frequencies[class_label_names[cat_id]] = freq
else:
nan_count = freq
nan_proportion = np.round(nan_count / n_samples, DECIMALS).item() if nan_count != 0 else 0.0
logging.debug(f"Statistics for {column_name=} computed")
return CategoricalStatisticsItem(
nan_count=nan_count,
nan_proportion=nan_proportion,
n_unique=len(categories) - 1 if nan_count else len(categories),
frequencies=frequencies,
)
def compute_descriptive_statistics_response(
dataset: str,
config: str,
split: str,
local_parquet_directory: Path,
hf_token: Optional[str],
parquet_revision: str,
histogram_num_bins: int,
max_parquet_size_bytes: int,
) -> SplitDescriptiveStatisticsResponse:
"""
Compute statistics and get response for the `split-descriptive-statistics` step.
Currently, integers, floats and ClassLabel features are supported.
Args:
dataset (`str`):
Name of a dataset.
config (`str`):
Requested dataset configuration name.
split (`str`):
Requested dataset split.
local_parquet_directory (`Path`):
Path to a local directory where the dataset's parquet files are stored. We download these files locally
because it enables fast querying and statistics computation.
hf_token (`str`, `optional`):
An app authentication token with read access to all the datasets.
parquet_revision (`str`):
The git revision (e.g. "refs/convert/parquet") from where to download the dataset's parquet files.
histogram_num_bins (`int`):
(Maximum) number of bins to compute histogram for numerical data.
The resulting number of bins might be lower than the requested one for integer data.
max_parquet_size_bytes (`int`):
The maximum size in bytes of the dataset's parquet files to compute statistics.
Datasets with bigger size are ignored.
Returns:
`SplitDescriptiveStatisticsResponse`: An object with the statistics response for a requested split, per each
numerical (int and float) or ClassLabel feature.
Raises the following errors:
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step does not have the expected format.
- [`libcommon.exceptions.ParquetResponseEmptyError`]
If response for `config-parquet-and-info` doesn't have any parquet files.
- [`libcommon.exceptions.SplitWithTooBigParquetError`]
If requested split's parquet files size exceeds the provided `max_parquet_size_bytes`.
- [`libcommon.exceptions.NoSupportedFeaturesError`]
If requested dataset doesn't have any supported for statistics computation features.
Currently, floats, integers and ClassLabels are supported.
- [`libcommon.exceptions.StatisticsComputationError`]
If there was some unexpected behaviour during statistics computation.
"""
logging.info(f"Compute descriptive statistics for {dataset=}, {config=}, {split=}")
config_parquet_and_info_step = "config-parquet-and-info"
parquet_and_info_best_response = get_previous_step_or_raise(
kinds=[config_parquet_and_info_step],
dataset=dataset,
config=config,
)
content_parquet_and_info = parquet_and_info_best_response.response["content"]
try:
split_parquet_files = [
parquet_file
for parquet_file in content_parquet_and_info["parquet_files"]
if parquet_file["config"] == config and parquet_file["split"] == split
]
dataset_info = content_parquet_and_info["dataset_info"]
except KeyError as e:
raise PreviousStepFormatError(
(
f"Previous step '{config_parquet_and_info_step}' did not return the expected content: "
"'parquet_files' or 'dataset_info'. "
),
e,
) from e
if not split_parquet_files:
raise ParquetResponseEmptyError("No parquet files found.")
features = dataset_info.get("features")
if features is None:
raise PreviousStepFormatError(
f"Previous step '{config_parquet_and_info_step}' did not return the expected content: "
"no features found in 'dataset_info'. "
)
split_parquets_size = sum(parquet_file["size"] for parquet_file in split_parquet_files)
if split_parquets_size > max_parquet_size_bytes:
raise SplitWithTooBigParquetError(
f"Statistics computation is limited to split parquets under {max_parquet_size_bytes} bytes. "
f"Current size of sum of split parquets is {split_parquets_size} bytes."
)
# store data as local parquet files for fast querying
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
logging.info(f"Downloading remote parquet files to a local directory {local_parquet_directory}. ")
for parquet_file in split_parquet_files:
# For directories like "partial-train" for the file at "en/partial-train/0000.parquet" in the C4 dataset.
# Note that "-" is forbidden for split names so it doesn't create directory names collisions.
split_directory = parquet_file["url"].rsplit("/", 2)[1]
hf_hub_download(
repo_type=REPO_TYPE,
revision=parquet_revision,
repo_id=dataset,
filename=f"{config}/{split_directory}/{parquet_file['filename']}",
local_dir=local_parquet_directory,
local_dir_use_symlinks=False,
token=hf_token,
cache_dir=local_parquet_directory,
)
local_parquet_glob_path = Path(local_parquet_directory) / config / f"{split}/*.parquet"
stats: list[StatisticsPerColumnItem] = []
num_examples = dataset_info["splits"][split]["num_examples"]
categorical_features = {
feature_name: feature
for feature_name, feature in features.items()
if isinstance(feature, dict) and feature.get("_type") == "ClassLabel"
}
numerical_features = {
feature_name: feature
for feature_name, feature in features.items()
if isinstance(feature, dict) and feature.get("_type") == "Value" and feature.get("dtype") in NUMERICAL_DTYPES
}
if not categorical_features and not numerical_features:
raise NoSupportedFeaturesError(
"No columns for statistics computation found. Currently supported feature types are: "
f"{NUMERICAL_DTYPES} and ClassLabel. "
)
con = duckdb.connect(":memory:") # we don't load data in local db file, use local parquet file instead
# configure duckdb extensions
con.sql(f"SET extension_directory='{local_parquet_directory}';")
con.sql("INSTALL httpfs")
con.sql("LOAD httpfs")
con.sql("SET enable_progress_bar=true;")
# compute for ClassLabels (we are sure that these are discrete categories)
if categorical_features:
logging.info(f"Compute statistics for categorical columns {categorical_features}")
for feature_name, feature in tqdm(categorical_features.items()):
logging.debug(f"Compute statistics for ClassLabel feature '{feature_name}'")
class_label_names = feature["names"]
cat_column_stats: CategoricalStatisticsItem = compute_categorical_statistics(
con,
feature_name,
class_label_names=class_label_names,
n_samples=num_examples,
parquet_filename=local_parquet_glob_path,
)
stats.append(
StatisticsPerColumnItem(
column_name=feature_name,
column_type=ColumnType.CLASS_LABEL,
column_statistics=cat_column_stats,
)
)
if numerical_features:
logging.info(f"Compute min, max, mean, median, std, histogram for numerical columns {numerical_features}. ")
for feature_name, feature in tqdm(numerical_features.items()):
column_type = ColumnType.FLOAT if feature["dtype"] in FLOAT_DTYPES else ColumnType.INT
num_column_stats: NumericalStatisticsItem = compute_numerical_statistics(
con,
feature_name,
parquet_filename=local_parquet_glob_path,
n_bins=histogram_num_bins,
n_samples=num_examples,
column_type=column_type,
)
stats.append(
StatisticsPerColumnItem(
column_name=feature_name,
column_type=column_type,
column_statistics=num_column_stats,
)
)
con.close()
return SplitDescriptiveStatisticsResponse(
num_examples=num_examples, statistics=sorted(stats, key=lambda x: x["column_name"])
)
class SplitDescriptiveStatisticsJobRunner(SplitJobRunnerWithCache):
descriptive_statistics_config: DescriptiveStatisticsConfig
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
statistics_cache_directory: StrPath,
):
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
cache_directory=Path(statistics_cache_directory),
)
self.descriptive_statistics_config = app_config.descriptive_statistics
@staticmethod
def get_job_type() -> str:
return "split-descriptive-statistics"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_DESCRIPTIVE_STATISTICS_VERSION
def compute(self) -> CompleteJobResult:
if self.cache_subdirectory is None:
raise CacheDirectoryNotInitializedError("Cache directory has not been initialized.")
return CompleteJobResult(
compute_descriptive_statistics_response(
dataset=self.dataset,
config=self.config,
split=self.split,
local_parquet_directory=self.cache_subdirectory,
hf_token=self.app_config.common.hf_token,
parquet_revision=self.descriptive_statistics_config.parquet_revision,
histogram_num_bins=self.descriptive_statistics_config.histogram_num_bins,
max_parquet_size_bytes=self.descriptive_statistics_config.max_parquet_size_bytes,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/descriptive_statistics.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_previous_step_or_raise
from worker.dtos import CompleteJobResult, OptInOutUrlsCountResponse
from worker.job_runners.split.split_job_runner import SplitJobRunner
def compute_opt_in_out_urls_count_response(
dataset: str,
config: str,
split: str,
) -> OptInOutUrlsCountResponse:
logging.info(f"get opt-in-out-urls-count for dataset={dataset} config={config} split={split}")
opt_in_out_urls_scan = get_previous_step_or_raise(
kinds=["split-opt-in-out-urls-scan"], dataset=dataset, config=config, split=split
)
try:
content = opt_in_out_urls_scan.response["content"]
opt_in_out_urls_count = OptInOutUrlsCountResponse(
has_urls_columns=content["has_urls_columns"],
num_opt_in_urls=content["num_opt_in_urls"],
num_opt_out_urls=content["num_opt_out_urls"],
num_scanned_rows=content["num_scanned_rows"],
num_urls=content["num_urls"],
urls_columns=content["urls_columns"],
full_scan=content["full_scan"],
)
except KeyError as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
return opt_in_out_urls_count
class SplitOptInOutUrlsCountJobRunner(SplitJobRunner):
@staticmethod
def get_job_type() -> str:
return "split-opt-in-out-urls-count"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_opt_in_out_urls_count_response(
dataset=self.dataset,
config=self.config,
split=self.split,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import copy
import logging
import os
from pathlib import Path
from typing import Any, Optional
import duckdb
from datasets.features.features import Features, FeatureType, Value, _visit
from huggingface_hub import hf_hub_download
from huggingface_hub._commit_api import (
CommitOperation,
CommitOperationAdd,
CommitOperationDelete,
)
from huggingface_hub.hf_api import HfApi
from huggingface_hub.utils._errors import HfHubHTTPError, RepositoryNotFoundError
from libcommon.constants import PROCESSING_STEP_SPLIT_DUCKDB_INDEX_VERSION
from libcommon.exceptions import (
CacheDirectoryNotInitializedError,
CreateCommitError,
DatasetNotFoundError,
DuckDBIndexFileNotFoundError,
LockedDatasetTimeoutError,
NoIndexableColumnsError,
ParquetResponseEmptyError,
PreviousStepFormatError,
SplitWithTooBigParquetError,
)
from libcommon.processing_graph import ProcessingStep
from libcommon.queue import lock
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.storage import StrPath
from libcommon.utils import JobInfo, SplitHubFile
from worker.config import AppConfig, DuckDbIndexConfig
from worker.dtos import CompleteJobResult
from worker.job_runners.split.split_job_runner import SplitJobRunnerWithCache
from worker.utils import (
HF_HUB_HTTP_ERROR_RETRY_SLEEPS,
LOCK_GIT_BRANCH_RETRY_SLEEPS,
create_branch,
hf_hub_url,
retry,
)
DATASET_TYPE = "dataset"
STRING_FEATURE_DTYPE = "string"
VALUE_FEATURE_TYPE = "Value"
DUCKDB_DEFAULT_INDEX_FILENAME = "index.duckdb"
CREATE_SEQUENCE_COMMAND = "CREATE OR REPLACE SEQUENCE serial START 0 MINVALUE 0;"
CREATE_INDEX_COMMAND = "PRAGMA create_fts_index('data', '__hf_index_id', {columns}, overwrite=1);"
CREATE_TABLE_COMMAND = "CREATE OR REPLACE TABLE data AS SELECT nextval('serial') AS __hf_index_id, {columns} FROM"
INSTALL_EXTENSION_COMMAND = "INSTALL '{extension}';"
LOAD_EXTENSION_COMMAND = "LOAD '{extension}';"
SET_EXTENSIONS_DIRECTORY_COMMAND = "SET extension_directory='{directory}';"
REPO_TYPE = "dataset"
HUB_DOWNLOAD_CACHE_FOLDER = "cache"
class DuckdbIndexWithFeatures(SplitHubFile):
features: Optional[dict[str, Any]]
def get_indexable_columns(features: Features) -> list[str]:
indexable_columns: list[str] = []
for column, feature in features.items():
indexable = False
def check_indexable(feature: FeatureType) -> None:
nonlocal indexable
if isinstance(feature, Value) and feature.dtype == "string":
indexable = True
_visit(feature, check_indexable)
if indexable:
indexable_columns.append(column)
return indexable_columns
def compute_index_rows(
job_id: str,
dataset: str,
config: str,
split: str,
duckdb_index_file_directory: Path,
target_revision: str,
hf_endpoint: str,
commit_message: str,
url_template: str,
hf_token: Optional[str],
max_parquet_size_bytes: int,
extensions_directory: Optional[str],
committer_hf_token: Optional[str],
) -> DuckdbIndexWithFeatures:
logging.info(f"get split-duckdb-index for dataset={dataset} config={config} split={split}")
# get parquet urls and dataset_info
config_parquet_and_info_step = "config-parquet-and-info"
parquet_and_info_best_response = get_previous_step_or_raise(
kinds=[config_parquet_and_info_step],
dataset=dataset,
config=config,
)
content_parquet_and_info = parquet_and_info_best_response.response["content"]
try:
split_parquet_files = [
parquet_file
for parquet_file in content_parquet_and_info["parquet_files"]
if parquet_file["config"] == config and parquet_file["split"] == split
]
split_parquets_size = sum(parquet_file["size"] for parquet_file in split_parquet_files)
if split_parquets_size > max_parquet_size_bytes:
raise SplitWithTooBigParquetError(
f"The indexing is limited to split parquets under {max_parquet_size_bytes} bytes. "
f"Current size of sum of split parquets is {split_parquets_size} bytes."
)
parquet_file_names = [parquet_file["filename"] for parquet_file in split_parquet_files]
if not parquet_file_names:
raise ParquetResponseEmptyError("No parquet files found.")
# For directories like "partial-train" for the file at "en/partial-train/0000.parquet" in the C4 dataset.
# Note that "-" is forbidden for split names so it doesn't create directory names collisions.
split_directory = split_parquet_files[0]["url"].rsplit("/", 2)[1]
# get the features
features = content_parquet_and_info["dataset_info"]["features"]
column_names = ",".join('"' + column + '"' for column in list(features.keys()))
# look for indexable columns (= possibly nested columns containing string data)
# copy the features is needed but will be fixed with https://github.com/huggingface/datasets/pull/6189
indexable_columns = ",".join(
'"' + column + '"' for column in get_indexable_columns(Features.from_dict(copy.deepcopy(features)))
)
if not indexable_columns:
raise NoIndexableColumnsError("No string columns available to index.")
except KeyError as e:
raise PreviousStepFormatError(
f"Previous step '{config_parquet_and_info_step}' did not return the expected content.", e
) from e
# index all columns
db_path = duckdb_index_file_directory.resolve() / DUCKDB_DEFAULT_INDEX_FILENAME
con = duckdb.connect(str(db_path.resolve()))
# configure duckdb extensions
if extensions_directory is not None:
con.execute(SET_EXTENSIONS_DIRECTORY_COMMAND.format(directory=extensions_directory))
con.execute(INSTALL_EXTENSION_COMMAND.format(extension="httpfs"))
con.execute(LOAD_EXTENSION_COMMAND.format(extension="httpfs"))
con.execute(INSTALL_EXTENSION_COMMAND.format(extension="fts"))
con.execute(LOAD_EXTENSION_COMMAND.format(extension="fts"))
logging.debug(CREATE_SEQUENCE_COMMAND)
con.sql(CREATE_SEQUENCE_COMMAND)
# see https://pypi.org/project/hf-transfer/ for more details about how to enable hf_transfer
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
for parquet_file in parquet_file_names:
hf_hub_download(
repo_type=REPO_TYPE,
revision=target_revision,
repo_id=dataset,
filename=f"{config}/{split_directory}/{parquet_file}",
local_dir=duckdb_index_file_directory,
local_dir_use_symlinks=False,
token=hf_token,
cache_dir=duckdb_index_file_directory,
)
all_split_parquets = f"{duckdb_index_file_directory}/{config}/{split_directory}/*.parquet"
create_command_sql = f"{CREATE_TABLE_COMMAND.format(columns=column_names)} '{all_split_parquets}';"
logging.debug(create_command_sql)
con.sql(create_command_sql)
# TODO: by default, 'porter' stemmer is being used, use a specific one by dataset language in the future
# see https://duckdb.org/docs/extensions/full_text_search.html for more details about 'stemmer' parameter
create_index_sql = CREATE_INDEX_COMMAND.format(columns=indexable_columns)
logging.debug(create_index_sql)
con.sql(create_index_sql)
con.close()
hf_api = HfApi(endpoint=hf_endpoint, token=hf_token)
committer_hf_api = HfApi(endpoint=hf_endpoint, token=committer_hf_token)
index_file_location = f"{config}/{split_directory}/{DUCKDB_DEFAULT_INDEX_FILENAME}"
try:
with lock.git_branch(
dataset=dataset, branch=target_revision, owner=job_id, sleeps=LOCK_GIT_BRANCH_RETRY_SLEEPS
):
logging.debug(f"try to create branch for {dataset=} with {target_revision=} on {hf_endpoint=}")
create_branch(
dataset=dataset,
target_revision=target_revision,
hf_api=hf_api,
committer_hf_api=committer_hf_api,
)
logging.debug(f"get dataset info for {dataset=} with {target_revision=}")
target_dataset_info = hf_api.dataset_info(repo_id=dataset, revision=target_revision, files_metadata=False)
all_repo_files: set[str] = {f.rfilename for f in target_dataset_info.siblings}
delete_operations: list[CommitOperation] = []
if index_file_location in all_repo_files:
delete_operations.append(CommitOperationDelete(path_in_repo=index_file_location))
logging.debug(f"delete operations for {dataset=} {delete_operations=}")
# send the files to the target revision
add_operations: list[CommitOperation] = [
CommitOperationAdd(path_in_repo=index_file_location, path_or_fileobj=db_path.resolve())
]
logging.debug(f"add operations for {dataset=} {add_operations=}")
retry_create_commit = retry(on=[HfHubHTTPError], sleeps=HF_HUB_HTTP_ERROR_RETRY_SLEEPS)(
committer_hf_api.create_commit
)
try:
retry_create_commit(
repo_id=dataset,
repo_type=DATASET_TYPE,
revision=target_revision,
operations=delete_operations + add_operations,
commit_message=commit_message,
parent_commit=target_dataset_info.sha,
)
except RuntimeError as e:
if e.__cause__ and isinstance(e.__cause__, HfHubHTTPError):
raise CreateCommitError(
message=(
f"Commit {commit_message} could not be created on the Hub (after"
f" {len(HF_HUB_HTTP_ERROR_RETRY_SLEEPS)} attempts)."
),
cause=e.__cause__,
) from e.__cause__
raise e
logging.debug(f"create commit {commit_message} for {dataset=} {add_operations=}")
# call the API again to get the index file
target_dataset_info = hf_api.dataset_info(repo_id=dataset, revision=target_revision, files_metadata=True)
logging.debug(f"dataset info for {dataset=} {target_dataset_info=}")
except TimeoutError as err:
raise LockedDatasetTimeoutError("the dataset is currently locked, please try again later.") from err
except RepositoryNotFoundError as err:
raise DatasetNotFoundError("The dataset does not exist on the Hub.") from err
repo_files = [
repo_file for repo_file in target_dataset_info.siblings if repo_file.rfilename == index_file_location
]
if not repo_files or len(repo_files) != 1:
logging.warning(f"Found {len(repo_files)} index files, should be only 1")
raise DuckDBIndexFileNotFoundError("No index file was found")
repo_file = repo_files[0]
if repo_file.size is None:
raise ValueError(f"Cannot get size of {repo_file.rfilename}")
# we added the __hf_index_id column for the index
features["__hf_index_id"] = {"dtype": "int64", "_type": "Value"}
return DuckdbIndexWithFeatures(
dataset=dataset,
config=config,
split=split,
url=hf_hub_url(
repo_id=dataset,
filename=repo_file.rfilename,
hf_endpoint=hf_endpoint,
revision=target_revision,
url_template=url_template,
),
filename=Path(repo_file.rfilename).name,
size=repo_file.size,
features=features,
)
class SplitDuckDbIndexJobRunner(SplitJobRunnerWithCache):
duckdb_index_config: DuckDbIndexConfig
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
duckdb_index_cache_directory: StrPath,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
cache_directory=Path(duckdb_index_cache_directory),
)
self.duckdb_index_config = app_config.duckdb_index
@staticmethod
def get_job_type() -> str:
return "split-duckdb-index"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_DUCKDB_INDEX_VERSION
def compute(self) -> CompleteJobResult:
if self.cache_subdirectory is None:
raise CacheDirectoryNotInitializedError("Cache directory has not been initialized.")
return CompleteJobResult(
compute_index_rows(
job_id=self.job_info["job_id"],
dataset=self.dataset,
config=self.config,
split=self.split,
duckdb_index_file_directory=self.cache_subdirectory,
hf_token=self.app_config.common.hf_token,
url_template=self.duckdb_index_config.url_template,
commit_message=self.duckdb_index_config.commit_message,
extensions_directory=self.duckdb_index_config.extensions_directory,
committer_hf_token=self.duckdb_index_config.committer_hf_token,
hf_endpoint=self.app_config.common.hf_endpoint,
target_revision=self.duckdb_index_config.target_revision,
max_parquet_size_bytes=self.duckdb_index_config.max_parquet_size_bytes,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/duckdb_index.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.constants import PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.utils import is_image_url
from worker.dtos import (
CompleteJobResult,
ImageUrlColumnsResponse,
SplitFirstRowsResponse,
)
from worker.job_runners.split.split_job_runner import SplitJobRunner
STRING_FEATURE_DTYPE = "string"
VALUE_FEATURE_TYPE = "Value"
URL_COLUMN_RATION = 0.3
def compute_image_url_columns(
dataset: str,
config: str,
split: str,
) -> ImageUrlColumnsResponse:
"""
Get the response of split-image-url-columns cache for a specific split of a dataset from huggingface.co.
The response is not used directly in the API but it is an input for split-opt-in-out-urls-scan processing step.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
split (`str`):
A split name.
Returns:
[`ImageUrlColumnsResponse`]: The list of image url columns.
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
"""
logging.info(f"get image-url-columns for dataset={dataset} config={config} split={split}")
# get the first rows from previous job
upstream_response = get_previous_step_or_raise(
kinds=["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
dataset=dataset,
config=config,
split=split,
)
try:
first_rows_response = upstream_response.response
upstream_response_content = SplitFirstRowsResponse(
dataset=dataset,
config=config,
split=split,
features=first_rows_response["content"]["features"],
rows=first_rows_response["content"]["rows"],
truncated=first_rows_response["content"]["truncated"]
if "truncated" in first_rows_response["content"]
else None,
)
features = upstream_response_content["features"]
first_rows = upstream_response_content["rows"]
except KeyError as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
# look for image URLs columns using the first rows
string_columns = [
feature["name"]
for feature in features
if "dtype" in feature["type"]
and "_type" in feature["type"]
and feature["type"]["dtype"] == STRING_FEATURE_DTYPE
and feature["type"]["_type"] == VALUE_FEATURE_TYPE
]
first_rows_size = len(first_rows)
if first_rows_size == 0:
return ImageUrlColumnsResponse(
columns=[],
)
urls_columns = []
for string_column in string_columns:
urls_count = sum(
1
for row in first_rows
if isinstance(row["row"].get(string_column), str) and is_image_url(text=row["row"][string_column])
)
if urls_count and urls_count / first_rows_size > URL_COLUMN_RATION:
urls_columns.append(string_column)
return ImageUrlColumnsResponse(
columns=urls_columns,
)
class SplitImageUrlColumnsJobRunner(SplitJobRunner):
@staticmethod
def get_job_type() -> str:
return "split-image-url-columns"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_image_url_columns(
dataset=self.dataset,
config=self.config,
split=self.split,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/image_url_columns.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from datasets import Audio, Features, Image
from fsspec.implementations.http import HTTPFileSystem
from libcommon.constants import (
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
)
from libcommon.exceptions import (
RowsPostProcessingError,
TooBigContentError,
TooManyColumnsError,
)
from libcommon.parquet_utils import Indexer, TooBigRows
from libcommon.processing_graph import ProcessingGraph, ProcessingStep
from libcommon.storage import StrPath
from libcommon.utils import JobInfo, Row, RowItem
from libcommon.viewer_utils.features import get_cell_value, to_features_list
from worker.config import AppConfig, FirstRowsConfig
from worker.dtos import CompleteJobResult, JobRunnerInfo, SplitFirstRowsResponse
from worker.job_runners.split.split_job_runner import SplitJobRunner
from worker.utils import create_truncated_row_items, get_json_size
def transform_rows(
dataset: str,
config: str,
split: str,
rows: list[RowItem],
features: Features,
assets_base_url: str,
assets_directory: StrPath,
) -> list[Row]:
return [
{
featureName: get_cell_value(
dataset=dataset,
config=config,
split=split,
row_idx=row_idx,
cell=row["row"][featureName] if featureName in row["row"] else None,
featureName=featureName,
fieldType=fieldType,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
)
for (featureName, fieldType) in features.items()
}
for row_idx, row in enumerate(rows)
]
def compute_first_rows_response(
dataset: str,
config: str,
split: str,
assets_base_url: str,
min_cell_bytes: int,
rows_max_bytes: int,
rows_max_number: int,
rows_min_number: int,
columns_max_number: int,
assets_directory: StrPath,
indexer: Indexer,
) -> SplitFirstRowsResponse:
logging.info(f"get first-rows for dataset={dataset} config={config} split={split}")
rows_index = indexer.get_rows_index(
dataset=dataset,
config=config,
split=split,
)
# validate the features
features = rows_index.parquet_index.features
if features and len(features) > columns_max_number:
raise TooManyColumnsError(
f"The number of columns ({len(features)}) exceeds the maximum supported number of columns"
f" ({columns_max_number}). This is a current limitation of the datasets viewer. You can reduce the number"
" of columns if you want the viewer to work."
)
# validate size of response without the rows
features_list = to_features_list(features=features)
response_features_only: SplitFirstRowsResponse = {
"dataset": dataset,
"config": config,
"split": split,
"features": features_list,
"rows": [],
"truncated": False,
}
surrounding_json_size = get_json_size(response_features_only)
if surrounding_json_size > rows_max_bytes:
raise TooBigContentError(
f"The size of the content of the first rows ({surrounding_json_size}) exceeds the maximum"
f" supported size ({rows_max_bytes} B) even after truncation. Please report the issue."
)
# get the rows
try:
pa_table = rows_index.query(offset=0, length=rows_max_number)
all_fetched = rows_index.parquet_index.num_rows_total <= rows_max_number
except TooBigRows as err:
raise TooBigContentError(str(err))
rows = [
RowItem(
{
"row_idx": idx,
"row": row,
"truncated_cells": [],
}
)
for idx, row in enumerate(pa_table.to_pylist())
]
# transform the rows, if needed (e.g. save the images or audio to the assets, and return their URL)
try:
transformed_rows = transform_rows(
dataset=dataset,
config=config,
split=split,
rows=rows,
features=features,
assets_base_url=assets_base_url,
assets_directory=assets_directory,
)
except Exception as err:
raise RowsPostProcessingError(
"Server error while post-processing the split rows. Please report the issue.",
cause=err,
) from err
# truncate the rows to fit within the restrictions, and prepare them as RowItems
columns_to_keep_untruncated = [col for col, feature in features.items() if isinstance(feature, (Image, Audio))]
row_items, truncated = create_truncated_row_items(
rows=transformed_rows,
min_cell_bytes=min_cell_bytes,
rows_max_bytes=rows_max_bytes - surrounding_json_size,
rows_min_number=rows_min_number,
columns_to_keep_untruncated=columns_to_keep_untruncated,
)
response = response_features_only
response["rows"] = row_items
response["truncated"] = (not all_fetched) or truncated
return response
class SplitFirstRowsFromParquetJobRunner(SplitJobRunner):
assets_directory: StrPath
first_rows_config: FirstRowsConfig
indexer: Indexer
@staticmethod
def get_job_type() -> str:
return "split-first-rows-from-parquet"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION
@staticmethod
def get_parallel_job_runner() -> JobRunnerInfo:
return JobRunnerInfo(
job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
job_type="split-first-rows-from-streaming",
)
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
processing_graph: ProcessingGraph,
assets_directory: StrPath,
parquet_metadata_directory: StrPath,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
self.first_rows_config = app_config.first_rows
self.assets_directory = assets_directory
self.assets_base_url = app_config.assets.base_url
self.parquet_metadata_directory = parquet_metadata_directory
self.indexer = Indexer(
processing_graph=processing_graph,
hf_token=self.app_config.common.hf_token,
parquet_metadata_directory=parquet_metadata_directory,
httpfs=HTTPFileSystem(headers={"authorization": f"Bearer {self.app_config.common.hf_token}"}),
unsupported_features=[],
all_columns_supported_datasets_allow_list="all",
max_arrow_data_in_memory=app_config.rows_index.max_arrow_data_in_memory,
)
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_first_rows_response(
dataset=self.dataset,
config=self.config,
split=self.split,
assets_base_url=self.assets_base_url,
assets_directory=self.assets_directory,
min_cell_bytes=self.first_rows_config.min_cell_bytes,
rows_max_bytes=self.first_rows_config.max_bytes,
rows_max_number=self.first_rows_config.max_number,
rows_min_number=self.first_rows_config.min_number,
columns_max_number=self.first_rows_config.columns_max_number,
indexer=self.indexer,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/split/first_rows_from_parquet.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from typing import Optional
from datasets import get_dataset_split_names
from datasets.builder import ManualDownloadError
from datasets.data_files import EmptyDatasetError as _EmptyDatasetError
from libcommon.constants import (
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
)
from libcommon.exceptions import (
DatasetManualDownloadError,
EmptyDatasetError,
SplitNamesFromStreamingError,
)
from worker.dtos import CompleteJobResult, FullSplitItem, JobRunnerInfo, SplitsList
from worker.job_runners.config.config_job_runner import ConfigJobRunnerWithDatasetsCache
def compute_split_names_from_streaming_response(
dataset: str,
config: str,
hf_token: Optional[str] = None,
) -> SplitsList:
"""
Get the response of config-split-names-from-streaming for one specific dataset and config on huggingface.co.
Dataset can be private or gated if you pass an acceptable token.
It is assumed that the dataset exists and can be accessed using the token, and that the config exists in
the dataset.
This function relies on the streaming mode if the splits are not directly defined in the dataset config. See
https://github.dev/huggingface/datasets/blob/e183a269067575db8765ee979bd8523d14a1adae/src/datasets/inspect.py#L389-L390
The config-split-names-from-streaming response generated by this function does not include stats about the split,
like the size or number of samples. See dataset-info or dataset-size for that.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
hf_token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
Returns:
`SplitsList`: An object with the list of split names for the dataset and config.
Raises the following errors:
- [`libcommon.exceptions.DatasetManualDownloadError`]:
If the dataset requires manual download.
- [`libcommon.exceptions.EmptyDatasetError`]
The dataset is empty.
- [`libcommon.exceptions.SplitsNamesError`]
If the list of splits could not be obtained using the datasets library.
"""
logging.info(f"get split names for dataset={dataset}, config={config}")
try:
split_name_items: list[FullSplitItem] = [
{"dataset": dataset, "config": config, "split": str(split)}
for split in get_dataset_split_names(path=dataset, config_name=config, token=hf_token)
]
except ManualDownloadError as err:
raise DatasetManualDownloadError(f"{dataset=} requires manual download.", cause=err) from err
except _EmptyDatasetError as err:
raise EmptyDatasetError("The dataset is empty.", cause=err) from err
except Exception as err:
raise SplitNamesFromStreamingError(
f"Cannot get the split names for the config '{config}' of the dataset.",
cause=err,
) from err
return SplitsList(splits=split_name_items)
class ConfigSplitNamesFromStreamingJobRunner(ConfigJobRunnerWithDatasetsCache):
@staticmethod
def get_job_type() -> str:
return "config-split-names-from-streaming"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION
@staticmethod
def get_parallel_job_runner() -> JobRunnerInfo:
return JobRunnerInfo(
job_runner_version=PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
job_type="config-split-names-from-info",
)
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_split_names_from_streaming_response(
dataset=self.dataset,
config=self.config,
hf_token=self.app_config.common.hf_token,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/config/split_names_from_streaming.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import PROCESSING_STEP_CONFIG_PARQUET_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_previous_step_or_raise
from worker.dtos import CompleteJobResult, ConfigParquetResponse
from worker.job_runners.config.config_job_runner import ConfigJobRunner
def compute_parquet_response(dataset: str, config: str) -> ConfigParquetResponse:
"""
Get the response of /parquet for one specific dataset on huggingface.co.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
Returns:
`ConfigParquetResponse`: An object with the parquet_response (list of parquet files).
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
"""
logging.info(f"get parquet files for dataset={dataset}, config={config}")
previous_step = "config-parquet-and-info"
config_parquet_and_info_best_response = get_previous_step_or_raise(
kinds=[previous_step], dataset=dataset, config=config
)
content = config_parquet_and_info_best_response.response["content"]
try:
parquet_files = [
parquet_file for parquet_file in content["parquet_files"] if parquet_file.get("config") == config
]
# sort by filename, which ensures the shards are in order: 00000, 00001, 00002, ...
parquet_files.sort(key=lambda x: (x["split"], x["filename"]))
if "features" in content["dataset_info"] and isinstance(content["dataset_info"]["features"], dict):
features = content["dataset_info"]["features"]
else:
# (July 23) we can remove this later and raise an error instead (can be None for backward compatibility)
features = None
partial = content["partial"]
except KeyError as e:
raise PreviousStepFormatError("Previous step did not return the expected content: 'parquet_files'.", e) from e
return ConfigParquetResponse(parquet_files=parquet_files, features=features, partial=partial)
class ConfigParquetJobRunner(ConfigJobRunner):
@staticmethod
def get_job_type() -> str:
return "config-parquet"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_PARQUET_VERSION
def compute(self) -> CompleteJobResult:
return CompleteJobResult(compute_parquet_response(dataset=self.dataset, config=self.config))
| datasets-server-main | services/worker/src/worker/job_runners/config/parquet.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from pathlib import Path
from libcommon.exceptions import ParameterMissingError
from libcommon.processing_graph import ProcessingStep
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.job_runners._job_runner_with_datasets_cache import (
JobRunnerWithDatasetsCache,
)
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
from worker.utils import check_config_exists
class ConfigJobRunner(DatasetJobRunner):
config: str
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
) -> None:
super().__init__(job_info=job_info, app_config=app_config, processing_step=processing_step)
if job_info["params"]["config"] is None:
raise ParameterMissingError("'config' parameter is required")
self.config = job_info["params"]["config"]
def validate(self) -> None:
check_config_exists(dataset=self.dataset, config=self.config)
class ConfigJobRunnerWithDatasetsCache(JobRunnerWithDatasetsCache, ConfigJobRunner):
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
hf_datasets_cache: Path,
) -> None:
JobRunnerWithDatasetsCache.__init__(
self=self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
hf_datasets_cache=hf_datasets_cache,
)
ConfigJobRunner.__init__(
self=self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
| datasets-server-main | services/worker/src/worker/job_runners/config/config_job_runner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/src/worker/job_runners/config/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import functools
import logging
import os
import re
from collections.abc import Callable, Generator
from contextlib import ExitStack
from fnmatch import fnmatch
from multiprocessing.pool import ThreadPool
from pathlib import Path
from types import TracebackType
from typing import Any, Optional, TypeVar, Union
from unittest.mock import patch
from urllib.parse import unquote
import datasets
import datasets.config
import datasets.info
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import requests
from datasets import DownloadConfig, Features, load_dataset_builder
from datasets.arrow_writer import ParquetWriter
from datasets.builder import DatasetBuilder, ManualDownloadError
from datasets.data_files import EmptyDatasetError as _EmptyDatasetError
from datasets.download import StreamingDownloadManager
from datasets.packaged_modules.parquet.parquet import Parquet as ParquetBuilder
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.file_utils import (
get_authentication_headers_for_url,
http_head,
is_relative_path,
url_or_path_join,
)
from datasets.utils.py_utils import asdict, map_nested
from huggingface_hub._commit_api import (
CommitOperation,
CommitOperationAdd,
CommitOperationCopy,
CommitOperationDelete,
)
from huggingface_hub.hf_api import CommitInfo, DatasetInfo, HfApi, RepoFile
from huggingface_hub.hf_file_system import HfFileSystem
from huggingface_hub.utils._errors import HfHubHTTPError, RepositoryNotFoundError
from libcommon.constants import (
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS,
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS,
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS,
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION,
)
from libcommon.dataset import get_dataset_info_for_supported_datasets
from libcommon.exceptions import (
ConfigNamesError,
CreateCommitError,
DatasetInBlockListError,
DatasetManualDownloadError,
DatasetNotFoundError,
DatasetWithTooManyParquetFilesError,
EmptyDatasetError,
ExternalFilesSizeRequestConnectionError,
ExternalFilesSizeRequestError,
ExternalFilesSizeRequestHTTPError,
ExternalFilesSizeRequestTimeoutError,
FileSystemError,
LockedDatasetTimeoutError,
PreviousStepFormatError,
UnsupportedExternalFilesError,
)
from libcommon.processing_graph import ProcessingStep
from libcommon.queue import lock
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.utils import JobInfo, SplitHubFile
from tqdm.contrib.concurrent import thread_map
from worker.config import AppConfig, ParquetAndInfoConfig
from worker.dtos import CompleteJobResult, ConfigParquetAndInfoResponse
from worker.job_runners.config.config_job_runner import ConfigJobRunnerWithDatasetsCache
from worker.utils import (
HF_HUB_HTTP_ERROR_RETRY_SLEEPS,
LOCK_GIT_BRANCH_RETRY_SLEEPS,
create_branch,
hf_hub_url,
retry,
)
DATASET_TYPE = "dataset"
MAX_FILES_PER_DIRECTORY = 10_000 # hf hub limitation
MAX_OPERATIONS_PER_COMMIT = 500
# For paths like "en/partial-train/0000.parquet" in the C4 dataset.
# Note that "-" is forbidden for split names so it doesn't create directory names collisions.
PARTIAL_SPLIT_PREFIX = "partial-"
T = TypeVar("T")
def repo_file_rfilename_sort_key(repo_file: RepoFile) -> str:
if not isinstance(repo_file.rfilename, str): # check type for mypy
raise ValueError(f"Expected a string for repo_file.rfilename, but got a '{type(repo_file.rfilename)}'.")
return repo_file.rfilename
class ParquetFile:
def __init__(
self, local_file: str, local_dir: str, config: str, split: str, shard_idx: int, partial: bool = False
):
if not local_file.startswith(local_dir):
raise ValueError(f"{local_file} is not in {local_dir}")
if shard_idx >= MAX_FILES_PER_DIRECTORY:
raise DatasetWithTooManyParquetFilesError(
"The dataset has too many parquet files and can't be uploaded in the parquet directory "
f"because it exceeds the maximum number of files per directory ({MAX_FILES_PER_DIRECTORY})."
)
self.local_file = local_file
self.local_dir = local_dir
self.config = config
self.split = split
self.shard_idx = shard_idx
self.partial = partial
@property
def path_in_repo(self) -> str:
partial_prefix = PARTIAL_SPLIT_PREFIX if self.partial else ""
# Using 4 digits is ok since MAX_FILES_PER_DIRECTORY == 10_000
return f"{self.config}/{partial_prefix}{self.split}/{self.shard_idx:04d}.parquet"
filename_pattern = re.compile("^[0-9]{4}\\.parquet$")
def parse_repo_filename(filename: str) -> tuple[str, str]:
if not filename_pattern.match(os.path.basename(filename)):
raise ValueError(f"Cannot parse {filename}")
parts = filename.split("/")
if len(parts) != 3:
raise ValueError(f"Invalid filename: {filename}")
config, split, _ = parts
if split.startswith(PARTIAL_SPLIT_PREFIX):
split = split[len(PARTIAL_SPLIT_PREFIX) :] # noqa: E203
return config, split
def create_parquet_file_item(
repo_file: RepoFile,
dataset: str,
config: str,
hf_endpoint: str,
target_revision: str,
url_template: str,
) -> SplitHubFile:
if repo_file.size is None:
raise ValueError(f"Cannot get size of {repo_file.rfilename}")
_, split = parse_repo_filename(repo_file.rfilename)
return {
"dataset": dataset,
"config": config,
"split": split,
"url": hf_hub_url(
repo_id=dataset,
filename=repo_file.rfilename,
hf_endpoint=hf_endpoint,
revision=target_revision,
url_template=url_template,
),
"filename": Path(repo_file.rfilename).name,
"size": repo_file.size,
}
def raise_if_blocked(
dataset: str,
blocked_datasets: list[str],
) -> None:
"""
Raise an error if the dataset is in the list of blocked datasets
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
blocked_datasets (`list[str]`):
The list of blocked datasets. If empty, no dataset is blocked.
Patterns are supported, e.g. "open-llm-leaderboard/*"
Returns:
`None`
Raises the following errors:
- [`libcommon.exceptions.DatasetInBlockListError`]
If the dataset is in the list of blocked datasets.
"""
for blocked_dataset in blocked_datasets:
if fnmatch(dataset, blocked_dataset):
raise DatasetInBlockListError(
"The parquet conversion has been disabled for this dataset for now. Please open an issue in"
" https://github.com/huggingface/datasets-server if you want this dataset to be supported."
)
def is_parquet_builder_with_hub_files(builder: DatasetBuilder) -> bool:
if not isinstance(builder, ParquetBuilder) or not builder.config.data_files:
return False
for split in builder.config.data_files:
for data_file in builder.config.data_files[split]:
if not data_file.startswith(f"hf://datasets/{builder.repo_id}@"):
return False
return True
def _is_too_big_from_hub(
dataset_info: DatasetInfo,
max_dataset_size: int,
) -> bool:
"""
Raise an error if the dataset is too big to be converted to parquet, as measured by the sum of the repository
files sizes given by the Hub.
Args:
dataset_info (`DatasetInfo`):
The dataset info
max_dataset_size (`int`):
The maximum size of the dataset in bytes
"""
dataset_size: int = sum(sibling.size for sibling in dataset_info.siblings if sibling.size is not None)
return bool(dataset_size > max_dataset_size)
def _is_too_big_from_datasets(
info: datasets.DatasetInfo,
max_dataset_size: int,
) -> bool:
"""
Raise an error if the dataset is too big to be converted to parquet, as measured by the sum of the configs
sizes given by the datasets library
Args:
info (`datasets.DatasetInfo`):
Dataset info from the datasets library
max_dataset_size (`int`):
The maximum size of the dataset in bytes
"""
dataset_size = info.dataset_size if info.dataset_size is not None else 0
return bool(dataset_size > max_dataset_size)
def raise_if_requires_manual_download(
builder: DatasetBuilder,
hf_endpoint: str,
hf_token: Optional[str],
) -> None:
"""
Raise an error if the dataset requires manual download.
Args:
builder (`datasets.builder.DatasetBuilder`):
A dataset builder instance to check.
hf_endpoint (`str`):
The Hub endpoint (for example: "https://huggingface.co").
hf_token (`str`, *optional*):
An app authentication token with read access to all the datasets.
Returns:
`None`
Raises:
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
If the datasets.config.HF_ENDPOINT is not set to the expected value.
[`libcommon.exceptions.DatasetManualDownloadError`]:
If the dataset requires manual download.
"""
if datasets.config.HF_ENDPOINT != hf_endpoint:
raise ValueError(
f"Invalid datasets.config.HF_ENDPOINT value: '{datasets.config.HF_ENDPOINT}'. Please set it to:"
f" '{hf_endpoint}'."
)
try:
builder._check_manual_download(
StreamingDownloadManager(base_path=builder.base_path, download_config=DownloadConfig(token=hf_token))
)
except ManualDownloadError as err:
raise DatasetManualDownloadError(f"dataset={builder.repo_id} requires manual download.", cause=err) from err
def is_dataset_too_big(
dataset_info: DatasetInfo,
builder: DatasetBuilder,
hf_endpoint: str,
hf_token: Optional[str],
max_dataset_size: int,
max_external_data_files: int,
) -> bool:
"""
Check:
- the size of the dataset repository
- the size in dataset info
- the size and number of external files
Args:
dataset_info (`DatasetInfo`):
The dataset info
builder (`datasets.builder.DatasetBuilder`):
A dataset builder instance to check.
hf_endpoint (`str`):
The Hub endpoint (for example: "https://huggingface.co")
hf_token (`str`, `optional`):
An app authentication token with read access to all the datasets.
revision (`str`):
The git revision (e.g. "main" or sha) of the dataset
max_dataset_size (`int`):
The maximum size of a dataset in bytes. If the dataset is under the limit (which means that the size
can be fetched), it will be allowed.
max_external_data_files (`int`):
The maximum number of external data files (i.e. not hosted on HF).
If the dataset is under the limit (which means that the files can be fetched), it will be allowed.
Returns:
`ParquetResponseResult`: An object with the parquet_response
(dataset and list of parquet files) and the dataset_git_revision (sha) if any.
Raises the following errors:
- [`libcommon.exceptions.UnsupportedExternalFilesError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestHTTPError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestConnectionError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestTimeoutError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If the datasets.config.HF_ENDPOINT is not set to the expected value
"""
if datasets.config.HF_ENDPOINT != hf_endpoint:
raise ValueError(
f"Invalid datasets.config.HF_ENDPOINT value: '{datasets.config.HF_ENDPOINT}'. Please set it to:"
f" '{hf_endpoint}'."
)
return (
_is_too_big_from_hub(dataset_info=dataset_info, max_dataset_size=max_dataset_size)
or _is_too_big_from_datasets(
builder.info,
max_dataset_size=max_dataset_size,
)
or _is_too_big_from_external_data_files(
builder=builder,
max_dataset_size=max_dataset_size,
max_external_data_files=max_external_data_files,
hf_token=hf_token,
)
)
class EmptySplitsError(Exception):
pass
class SplitInfoFormatError(Exception):
pass
class EmptyConfigNameError(Exception):
pass
class EmptyDownloadSizeError(Exception):
pass
class EmptyFeaturesError(Exception):
pass
def _request_size(url: str, hf_token: Optional[str] = None) -> Optional[int]:
headers = get_authentication_headers_for_url(url, token=hf_token)
response = http_head(url, headers=headers, max_retries=3)
response.raise_for_status()
size = response.headers.get("Content-Length") if response.ok else None
return int(size) if size is not None else size
class _MockStreamingDownloadManager(StreamingDownloadManager): # type: ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.ext_data_files: list[str] = []
def download(self, url_or_urls: Any) -> Any:
url_or_urls = map_nested(
self._download,
url_or_urls,
map_tuple=True,
parallel_min_length=np.inf,
# ^ parallel_min_length has int type, but is currently used in datasets for a comparison only
# and it works with np.inf. No conversion is involved
# (would raise: OverflowError: cannot convert float infinity to integer)
)
return url_or_urls
def _download(self, urlpath: Any) -> str:
urlpath_str = str(urlpath)
if is_relative_path(urlpath_str):
# append the relative path to the base_path
urlpath_str = url_or_path_join(self._base_path, urlpath_str)
elif not urlpath_str.startswith(self._base_path):
# it's an external file
self.ext_data_files.append(urlpath_str)
return urlpath_str
def _is_too_big_from_external_data_files(
builder: DatasetBuilder, max_dataset_size: int, max_external_data_files: int, hf_token: Optional[str]
) -> bool:
# Packaged dataset modules only load data files that are inside the dataset repository.
# No need to check them since they're already caught by `raise_if_too_big_from_hub`
if type(builder).__module__.startswith("datasets."):
return False
# For datasets with a loading script however, we need to check the downloaded files
mock_dl_manager = _MockStreamingDownloadManager(
base_path=builder.base_path, download_config=DownloadConfig(token=hf_token)
)
try:
builder._split_generators(mock_dl_manager)
except (requests.exceptions.RequestException, NotImplementedError) as error:
if isinstance(error, NotImplementedError):
# we can ignore the errors from functions not implemented in streaming mode like `.extract()` on TAR files
if "is not implemented in streaming mode." not in str(error):
raise UnsupportedExternalFilesError(
(
"Couldn't get the list of external files in `_split_generators` because it doesn't support"
f" streaming:\n{error}"
),
error,
) from error
elif isinstance(error, requests.exceptions.HTTPError):
raise ExternalFilesSizeRequestHTTPError(
(
"Couldn't get the list of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
elif isinstance(error, requests.exceptions.ConnectionError):
raise ExternalFilesSizeRequestConnectionError(
(
"Couldn't get the list of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
elif isinstance(error, requests.exceptions.Timeout):
raise ExternalFilesSizeRequestTimeoutError(
(
"Couldn't get the list of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
else:
raise ExternalFilesSizeRequestError(
(
"Couldn't get the list of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
ext_data_files = mock_dl_manager.ext_data_files
if len(ext_data_files) > max_external_data_files:
return True
elif ext_data_files:
try:
with ThreadPool(16) as pool:
total_size = 0
get_size = functools.partial(_request_size, hf_token=hf_token)
for i, size in enumerate(pool.imap_unordered(get_size, ext_data_files)):
if size is not None:
total_size += size
return total_size > max_dataset_size
return False
except requests.exceptions.RequestException as error:
if isinstance(error, requests.exceptions.HTTPError):
raise ExternalFilesSizeRequestHTTPError(
(
"Couldn't get the size of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
elif isinstance(error, requests.exceptions.ConnectionError):
raise ExternalFilesSizeRequestConnectionError(
(
"Couldn't get the size of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
elif isinstance(error, requests.exceptions.Timeout):
raise ExternalFilesSizeRequestTimeoutError(
(
"Couldn't get the size of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
else:
raise ExternalFilesSizeRequestError(
(
"Couldn't get the size of external files in `_split_generators` because a request"
f" failed:\n{error}\nPlease consider moving your data files in this dataset repository instead"
" (e.g. inside a data/ folder)."
),
error,
) from error
return False
def get_writer_batch_size_from_info(ds_config_info: datasets.info.DatasetInfo) -> Optional[int]:
"""
Get the writer_batch_size that defines the maximum row group size in the parquet files.
The default in `datasets` is 1,000 but we lower it to 100 for image datasets.
This allows to optimize random access to parquet file, since accessing 1 row requires
to read its entire row group.
Args:
ds_config_info (`datasets.info.DatasetInfo`):
Dataset info from `datasets`.
Returns:
writer_batch_size (`Optional[int]`):
Writer batch size to pass to a dataset builder.
If `None`, then it will use the `datasets` default.
"""
if "Audio(" in str(ds_config_info.features):
return PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS
elif "Image(" in str(ds_config_info.features):
return PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS
elif "'binary'" in str(ds_config_info.features):
return PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS
else:
return None
def get_writer_batch_size_from_row_group_size(
num_rows: int, row_group_byte_size: int, max_row_group_byte_size: int, factor_of: int = 100, divide_step: int = 10
) -> int:
"""
Get the writer_batch_size that defines the maximum row group size in the parquet files,
given a sample row group size that mught be too big.
This allows to optimize random access to parquet file, since accessing 1 row requires
to read its entire row group.
Args:
num_rows (`int`):
Number of rows in the sample row group.
row_group_byte_size (`int`):
Number of bytes of uncompressed data in the sample row group.
max_row_group_byte_size (`int`):
Maximum number of bytes of uncompressed data for batches that
will be passed to a dataset builder.
Returns:
writer_batch_size (`Optional[int]`):
Writer batch size to pass to a dataset builder.
"""
writer_batch_size = max(num_rows // factor_of * factor_of, factor_of)
writer_batch_byte_size = row_group_byte_size * writer_batch_size / num_rows
while writer_batch_size > factor_of and writer_batch_byte_size > max_row_group_byte_size:
writer_batch_size = max(writer_batch_size // divide_step // factor_of * factor_of, factor_of)
writer_batch_byte_size = row_group_byte_size * writer_batch_size / num_rows
return writer_batch_size
def copy_parquet_files(builder: DatasetBuilder) -> list[CommitOperationCopy]:
"""Copy parquet files by copying the git LFS pointer files"""
data_files = builder.config.data_files
if not data_files:
raise EmptyDatasetError("Empty parquet data_files")
parquet_operations = []
total_num_parquet_files = sum(len(data_files[split]) for split in data_files)
if total_num_parquet_files >= MAX_FILES_PER_DIRECTORY:
raise DatasetWithTooManyParquetFilesError(
f"The dataset has {total_num_parquet_files} parquet files and can't be linked in the parquet directory "
f"because it exceeds the maximum number of files per directory ({MAX_FILES_PER_DIRECTORY})."
)
for split in data_files:
for shard_idx, data_file in enumerate(data_files[split]):
# data_file format for hub files is hf://datasets/{repo_id}@{revision}/{path_in_repo}
src_revision, src_path_in_repo = data_file.split("@")[1].split("/", 1)
src_revision = unquote(src_revision)
src_path_in_repo = unquote(src_path_in_repo)
path_in_repo = f"{builder.config.name}/{split}/{shard_idx:04d}.parquet"
parquet_operations.append(
CommitOperationCopy(
src_path_in_repo=src_path_in_repo, path_in_repo=path_in_repo, src_revision=src_revision
)
)
return parquet_operations
class NotAParquetFileError(ValueError):
"""When a remote parquet file can't be parsed"""
pass
class ParquetValidationError(ValueError):
"""When a parquet file is not validated for copy"""
class TooBigRowGroupsError(ParquetValidationError):
"""When a parquet file has row groups that are too big for copy"""
def __init__(self, *args: object, num_rows: int, row_group_byte_size: int) -> None:
super().__init__(*args)
self.num_rows = num_rows
self.row_group_byte_size = row_group_byte_size
def get_parquet_file_and_size(url: str, hf_endpoint: str, hf_token: Optional[str]) -> tuple[pq.ParquetFile, int]:
fs = HfFileSystem(endpoint=hf_endpoint, token=hf_token)
f = fs.open(url)
return pq.ParquetFile(f), f.size
def retry_and_validate_get_parquet_file_and_size(
url: str, hf_endpoint: str, hf_token: Optional[str], validate: Optional[Callable[[pq.ParquetFile], None]]
) -> tuple[pq.ParquetFile, int]:
try:
sleeps = [1, 1, 1, 10, 10, 10]
pf, size = retry(on=[pa.ArrowInvalid], sleeps=sleeps)(get_parquet_file_and_size)(url, hf_endpoint, hf_token)
if validate:
validate(pf)
return pf, size
except RuntimeError as err:
if err.__cause__ and isinstance(err.__cause__, pa.ArrowInvalid):
raise NotAParquetFileError(f"Not a parquet file: '{url}'") from err.__cause__
else:
raise err
class ParquetFileValidator:
"""
Validate the Parquet files before they are copied to the target revision.
In particular we check that the row group size is not too big, otherwise the dataset viewer
doesn't work correctly.
Note: we only validate the first parquet files (default 5 first files).
We don't want to check the biggest row group of all the dataset, but rather just get the order
of magnitude of the size. Otherwise we might end up converting a dataset that has 99% good row
groups but 1% that is a bit too big, which is overkill.
"""
def __init__(self, max_row_group_byte_size: int, max_validation: int = 5) -> None:
self.max_row_group_byte_size = max_row_group_byte_size
self.num_validations = 0
self.max_validations = max_validation
def validate(self, pf: pq.ParquetFile) -> None:
if self.num_validations >= self.max_validations:
return
row_group_metadata = pf.metadata.row_group(0)
row_group_size = row_group_metadata.total_byte_size
if row_group_metadata.total_byte_size > self.max_row_group_byte_size:
raise TooBigRowGroupsError(
(
f"Parquet file has too big row groups. First row group has {row_group_size} which exceeds the"
f" limit of {self.max_row_group_byte_size}"
),
num_rows=row_group_metadata.num_rows,
row_group_byte_size=row_group_metadata.total_byte_size,
)
self.num_validations += 1
def fill_builder_info(
builder: DatasetBuilder,
hf_endpoint: str,
hf_token: Optional[str],
validate: Optional[Callable[[pq.ParquetFile], None]],
) -> None:
"""Fill the builder DatasetInfo from the copied parquet files"""
data_files = builder.config.data_files
if not data_files:
raise EmptyDatasetError("Empty parquet data_files")
builder.info.builder_name = builder.name
builder.info.dataset_name = builder.dataset_name
builder.info.config_name = builder.config.name
builder.info.version = builder.config.version
builder.info.splits = SplitDict()
builder.info.download_size = 0
builder.info.dataset_size = 0
for split in data_files:
split = str(split) # in case it's a NamedSplit
try:
parquet_files_and_sizes: list[tuple[pq.ParquetFile, int]] = thread_map(
functools.partial(
retry_and_validate_get_parquet_file_and_size,
hf_endpoint=hf_endpoint,
hf_token=hf_token,
validate=validate,
),
data_files[split],
unit="pq",
disable=True,
)
parquet_files, sizes = zip(*parquet_files_and_sizes)
except ParquetValidationError:
raise
except Exception as e:
raise FileSystemError(f"Could not read the parquet files: {e}") from e
if parquet_files:
first_pf = parquet_files[0]
if builder.info.features is None:
builder.info.features = Features.from_arrow_schema(first_pf.schema_arrow)
first_row_group = first_pf.read_row_group(0)
compression_ratio = first_row_group.nbytes / first_row_group.num_rows
num_examples = sum(parquet_file.metadata.num_rows for parquet_file in parquet_files)
approx_num_bytes = int(compression_ratio * num_examples)
builder.info.splits.add(SplitInfo(split, num_bytes=approx_num_bytes, num_examples=num_examples))
builder.info.download_size += sum(sizes)
builder.info.dataset_size += approx_num_bytes
class limit_parquet_writes:
"""
Context manager that limits the number of bytes a `DatasetBuilder` can write to parquet.
It works by monitoring the calls to `pq.ParquetWriter.write_table` and stopping
the `GeneratorBasedBuilder._generate_examples` and `ArrowBasedBuilder._generate_tables`
generators once we reach the maximum number of bytes.
Since the generator is stopped after we reach the maximum number of bytes, the actual
number of bytes generated might be slightly higher than the requested limit.
Example of usage:
```python
builder = load_dataset_builder("squad")
max_dataset_size = 10_000_000
with limit_parquet_writes(builder, max_dataset_size=max_dataset_size) as limiter:
builder.download_and_prepare(file_format="parquet")
assert builder.info.dataset_size == limiter.total_bytes < max_dataset_size + epsilon
```
The limiter is usually used with a `StreamingDownloadManager` to not have to download
the full dataset:
```python
builder = load_dataset_builder("squad")
max_dataset_size = 10_000_000
dl_manager = StreamingDownloadManager(...)
for split_generator in builder._split_generators(dl_manager):
with limit_parquet_writes(builder, max_dataset_size=max_dataset_size):
builder._prepare_split(split_generator=split_generator, file_format="parquet")
```
"""
def __init__(
self,
builder: Union[datasets.builder.GeneratorBasedBuilder, datasets.builder.ArrowBasedBuilder],
max_dataset_size: int,
) -> None:
self.total_bytes = 0
self.builder = builder
self.max_dataset_size = max_dataset_size
self.exit_stack = ExitStack()
def __enter__(self) -> "limit_parquet_writes":
limiter = self
class _TrackedParquetWriter(pq.ParquetWriter): # type: ignore
"""Count on-the-fly how many bytes are written"""
def track_write_table(self, pa_table: pa.Table) -> None:
limiter.total_bytes += pa_table.nbytes
def write_table(self, pa_table: pa.Table, row_group_size: Optional[int] = None) -> None:
self.track_write_table(pa_table)
super().write_table(pa_table, row_group_size=row_group_size)
def limited_generator(
generator: Callable[..., Generator[T, None, None]]
) -> Callable[..., Generator[T, None, None]]:
"""Stop the underlying generator once we reach the maximum dataset size"""
@functools.wraps(generator)
def wrapped(*args: Any, **kwargs: Any) -> Generator[T, None, None]:
for item in generator(*args, **kwargs):
if limiter.total_bytes < limiter.max_dataset_size:
yield item
else:
break
return wrapped
self.exit_stack.enter_context(patch.object(ParquetWriter, "_WRITER_CLASS", _TrackedParquetWriter))
if isinstance(self.builder, datasets.builder.GeneratorBasedBuilder):
self.exit_stack.enter_context(
patch.object(self.builder, "_generate_examples", limited_generator(self.builder._generate_examples))
)
else:
self.exit_stack.enter_context(
patch.object(self.builder, "_generate_tables", limited_generator(self.builder._generate_tables))
)
return self
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
return self.exit_stack.close()
def list_generated_parquet_files(builder: DatasetBuilder, partial: bool = False) -> list[ParquetFile]:
"""List the parquet files generated by `builder.download_and_prepare` in the `builder.cache_dir`."""
if not builder.info.splits:
raise EmptyDatasetError("No split found after generating parquet files")
split_dict = builder.info.splits
local_parquet_files: list[ParquetFile] = []
for split, split_info in split_dict.items():
# We know the `datasets` library uses a template for the shards names:
# - {builder.dataset_name}-{split}.parquet if there is only one shard
# - {builder.dataset_name}-{split}-{shard_idx:05d}-of-{num_shards:05d}.parquet otherwise
num_shards = len(split_info.shard_lengths) if isinstance(split_info.shard_lengths, list) else 1
filename_suffix = "-{shard_idx:05d}-of-" + f"{num_shards:05d}" if num_shards > 1 else ""
filename = f"{builder.dataset_name}-{split}{filename_suffix}.parquet"
local_parquet_files.extend(
[
ParquetFile(
local_file=os.path.join(
builder.cache_dir,
filename.format(shard_idx=shard_idx),
),
local_dir=builder.cache_dir,
config=builder.config.name,
split=split,
shard_idx=shard_idx,
partial=partial,
)
for shard_idx in range(num_shards)
]
)
return local_parquet_files
def stream_convert_to_parquet(
builder: DatasetBuilder, max_dataset_size: Optional[int], writer_batch_size: Optional[int] = None
) -> tuple[list[CommitOperationAdd], bool]:
"""Stream and prepare the dataset as parquet files and fills the builder info."""
writer_batch_size = writer_batch_size or get_writer_batch_size_from_info(builder.info)
if writer_batch_size is not None and (
builder._writer_batch_size is None or builder._writer_batch_size > writer_batch_size
):
builder._writer_batch_size = writer_batch_size
dl_manager = StreamingDownloadManager(
base_path=builder.base_path,
download_config=DownloadConfig(token=builder.token, storage_options=builder.storage_options),
dataset_name=builder.name,
data_dir=builder.config.data_dir,
)
os.makedirs(builder.cache_dir, exist_ok=True)
split_dict = SplitDict(dataset_name=builder.name)
splits_generators = {sg.name: sg for sg in builder._split_generators(dl_manager)}
prepare_split_kwargs: dict[str, Any] = (
{"check_duplicate_keys": True} if isinstance(builder, datasets.builder.GeneratorBasedBuilder) else {}
)
partial = False
for split in splits_generators:
split_dict.add(splits_generators[split].split_info)
if max_dataset_size is None:
builder._prepare_split(
split_generator=splits_generators[split], file_format="parquet", **prepare_split_kwargs
)
else:
with limit_parquet_writes(builder, max_dataset_size=max_dataset_size) as limiter:
builder._prepare_split(
split_generator=splits_generators[split], file_format="parquet", **prepare_split_kwargs
)
partial = partial or limiter.total_bytes >= max_dataset_size
builder.info.splits = split_dict
builder.info.dataset_size = sum(split.num_bytes for split in builder.info.splits.values())
builder.info.download_size = None
builder.info.size_in_bytes = None
# send the files to the target revision
local_parquet_files = list_generated_parquet_files(builder, partial=partial)
parquet_operations: list[CommitOperationAdd] = [
CommitOperationAdd(path_in_repo=parquet_file.path_in_repo, path_or_fileobj=parquet_file.local_file)
for parquet_file in local_parquet_files
]
return parquet_operations, partial
def convert_to_parquet(builder: DatasetBuilder) -> list[CommitOperationAdd]:
"""Download and prepare the dataset as parquet files and fills the builder info."""
# prepare the parquet files locally
writer_batch_size = get_writer_batch_size_from_info(builder.info)
if writer_batch_size is not None and (
builder._writer_batch_size is None or builder._writer_batch_size > writer_batch_size
):
builder._writer_batch_size = writer_batch_size
builder.download_and_prepare(
file_format="parquet"
) # the parquet files are stored in the cache dir and it fills the info
local_parquet_files = list_generated_parquet_files(builder)
# send the files to the target revision
parquet_operations: list[CommitOperationAdd] = [
CommitOperationAdd(path_in_repo=parquet_file.path_in_repo, path_or_fileobj=parquet_file.local_file)
for parquet_file in local_parquet_files
]
logging.debug(f"{parquet_operations=}")
return parquet_operations
def create_commits(
hf_api: HfApi,
repo_id: str,
operations: list[CommitOperation],
*,
commit_message: str,
revision: Optional[str] = None,
parent_commit: Optional[str] = None,
max_operations_per_commit: int = MAX_OPERATIONS_PER_COMMIT,
) -> list[CommitInfo]:
"""
Creates one or several commits in the given dataset repo, deleting & uploading files as needed.
Args:
hf_api (`huggingface_hub.HfApi`):
The HfApi to use to commit the operations.
repo_id (`str`):
The repository in which the commit will be created, for example:
`"username/my_dataset"`
operations (`Iterable` of [`huggingface_hub.hf_api.CommitOperation`]):
An iterable of operations to include in the commit, either:
- [`huggingface_hub.hf_api.CommitOperationAdd`] to upload a file
- [`huggingface_hub.hf_api.CommitOperationDelete`] to delete a file
- [`huggingface_hub.hf_api.CommitOperationCopy`] to copy a file
commit_message (`str`):
The summary (first line) of the commit that will be created.
commit_description (`str`, *optional*):
The description of the commit that will be created
token (`str`, *optional*):
Authentication token, obtained with `HfApi.login` method. Will
default to the stored token.
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if uploading to a dataset or
space, `None` or `"model"` if uploading to a model. Default is
`None`.
revision (`str`, *optional*):
The git revision to commit from. Defaults to the head of the `"main"` branch.
parent_commit (`str`, *optional*):
The OID / SHA of the parent commit, as a hexadecimal string.
Shorthands (7 first characters) are also supported. If specified and `create_pr` is `False`,
the commit will fail if `revision` does not point to `parent_commit`. If specified and `create_pr`
is `True`, the pull request will be created from `parent_commit`. Specifying `parent_commit`
ensures the repo has not changed before committing the changes, and can be especially useful
if the repo is updated / committed to concurrently.
max_operations_per_commit (`int`, *optional*):
The max number of operations per commit, to avoid time out errors from the Hub. Defaults to 500.
Returns:
[`list[huggingface_hub.CommitInfo]`]:
List of [`CommitInfo`] containing information about the newly created commit (commit hash, commit
url, pr url, commit message,...).
Raises:
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If commit message is empty.
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If parent commit is not a valid commit OID.
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If the Hub API returns an HTTP 400 error (bad request)
[`huggingface_hub.utils.RepositoryNotFoundError`]:
If repository is not found (error 404): wrong repo_id/repo_type, private
but not authenticated or repo does not exist.
[`libcommon.exceptions.CreateCommitError`]:
If one of the commits could not be created on the Hub.
"""
commit_infos: list[CommitInfo] = []
offsets = range(0, len(operations), max_operations_per_commit)
for commit_idx, offset in enumerate(offsets):
batch_msg = f" (step {commit_idx + 1} of {len(offsets)})" if len(offsets) > 1 else ""
retry_create_commit = retry(on=[HfHubHTTPError], sleeps=HF_HUB_HTTP_ERROR_RETRY_SLEEPS)(hf_api.create_commit)
try:
commit_info = retry_create_commit(
repo_id=repo_id,
repo_type=DATASET_TYPE,
revision=revision,
operations=operations[offset : offset + max_operations_per_commit], # noqa: E203
commit_message=commit_message + batch_msg,
parent_commit=commit_infos[-1].oid if commit_infos else parent_commit,
)
except RuntimeError as e:
if e.__cause__ and isinstance(e.__cause__, HfHubHTTPError):
raise CreateCommitError(
message=(
f"Commit {commit_idx}/{len(offsets)} could not be created on the Hub (after"
f" {len(HF_HUB_HTTP_ERROR_RETRY_SLEEPS)} attempts)."
),
cause=e.__cause__,
) from e.__cause__
raise e
commit_infos.append(commit_info)
return commit_infos
def get_delete_operations(
parquet_operations: list[CommitOperationAdd], all_repo_files: set[str], config_names: set[str], config: str
) -> list[CommitOperationDelete]:
# - get files that will be preserved in repo:
# 1. parquet files belonging to any other config (otherwise outdated files might be preserved)
# 2. duckdb files belonging to any config
# 3. .gitattributes
pattern_in_any_config_dir = re.compile(f"^({'|'.join(re.escape(conf) for conf in config_names)})/")
pattern_in_any_other_config_dir = re.compile(
f"^({'|'.join(re.escape(conf) for conf in config_names.difference({config}))})/"
)
files_to_ignore: set[str] = {
file
for file in all_repo_files
if (pattern_in_any_other_config_dir.match(file) and file.endswith(".parquet"))
or (pattern_in_any_config_dir.match(file) and file.endswith(".duckdb"))
}.union({".gitattributes"})
# - get files to be deleted - all files except for:
# - the files to be preserved
# - parquet files obtained for current config at this processing step
files_to_add = [operation.path_in_repo for operation in parquet_operations]
files_to_delete = all_repo_files - set(files_to_add).union(files_to_ignore)
delete_operations = [CommitOperationDelete(path_in_repo=file) for file in files_to_delete]
logging.debug(f"{delete_operations=}")
return delete_operations
def commit_parquet_conversion(
hf_api: HfApi,
committer_hf_api: HfApi,
dataset: str,
config: str,
config_names: set[str],
parquet_operations: list[CommitOperation],
commit_message: str,
target_revision: Optional[str],
) -> list[CommitInfo]:
"""
Creates one or several commits in the given dataset repo, deleting & uploading files as needed.
Args:
hf_api (`huggingface_hub.HfApi`):
The HfApi to get the dataset info.
committer_hf_api (`huggingface_hub.HfApi`):
The HfApi to use to commit the operations.
dataset (`str`):
The dataset in which the commit will be created, for example:
`"username/my_dataset"`
config (`str`):
The dataset configuration.
config_names (`list[str]`):
The list of all the configurations of this dataset. This is used to clean
the other fiels and directories in the repo, if any.
parquet_operations (`list[huggingface_hub.hf_api.CommitOperation]`):
List of commit operation for the parquet conversion. It could be
file additions or file copies for example.
commit_message (`str`):
The summary (first line) of the commit that will be created.
target_revision (`str`, *optional*):
The git revision to commit from. Defaults to the head of the `"main"` branch.
Returns:
[`list[huggingface_hub.CommitInfo]`]:
List of [`CommitInfo`] containing information about the newly created commit (commit hash, commit
url, pr url, commit message,...).
Raises:
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If commit message is empty.
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If parent commit is not a valid commit OID.
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If the Hub API returns an HTTP 400 error (bad request)
[`huggingface_hub.utils.RepositoryNotFoundError`]:
If repository is not found (error 404): wrong repo_id/repo_type, private
but not authenticated or repo does not exist.
[`libcommon.exceptions.CreateCommitError`]:
If one of the commits could not be created on the Hub.
"""
target_dataset_info = hf_api.dataset_info(repo_id=dataset, revision=target_revision, files_metadata=False)
all_repo_files: set[str] = {f.rfilename for f in target_dataset_info.siblings}
delete_operations = get_delete_operations(
parquet_operations=parquet_operations, all_repo_files=all_repo_files, config_names=config_names, config=config
)
operations = delete_operations + parquet_operations
return create_commits(
committer_hf_api,
repo_id=dataset,
revision=target_revision,
operations=operations,
commit_message=commit_message,
parent_commit=target_dataset_info.sha,
)
def compute_config_parquet_and_info_response(
job_id: str,
dataset: str,
config: str,
hf_endpoint: str,
hf_token: Optional[str],
committer_hf_token: Optional[str],
source_revision: str,
target_revision: str,
commit_message: str,
url_template: str,
blocked_datasets: list[str],
max_dataset_size: int,
max_external_data_files: int,
max_row_group_byte_size_for_copy: int,
no_max_size_limit_datasets: list[str],
) -> ConfigParquetAndInfoResponse:
"""
Get the response of config-parquet-and-info for one specific dataset and config on huggingface.co.
It is assumed that the dataset can be accessed with the token.
Args:
job_id (`str`):
The id of the current Job. It is used to lock the access to the parquet conversion branch on the Hub.
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
Dataset configuration name
hf_endpoint (`str`):
The Hub endpoint (for example: "https://huggingface.co")
hf_token (`str`, `optional`):
An app authentication token with read access to all the datasets.
committer_hf_token (`str`, `optional`):
An app authentication token with write access. It must be part of the `datasets-maintainers`
organization (to create the refs/convert/parquet "branch" and push to it)
source_revision (`str`):
The git revision (e.g. "main" or sha) of the dataset used to prepare the parquet files
target_revision (`str`):
The target git revision (e.g. "refs/convert/parquet") of the dataset where to store the parquet files
commit_message (`str`):
The commit message to use when storing the parquet files
url_template (`str`):
The template to use to build the parquet file url
blocked_datasets (`list[str]`):
The list of blocked datasets. If empty, no dataset is blocked.
max_dataset_size (`int`):
The maximum size of a dataset in bytes. If the dataset is under the limit (which means that the size
can be fetched), it will be allowed.
max_external_data_files (`int`):
The maximum number of external data files of a dataset. This is for datasets with loading scripts only.
max_row_group_byte_size_for_copy (`int`):
The maximum size in bytes of parquet files that are allowed to be copied without being converted.
no_max_size_limit_datasets (`list[str]`):
List of datasets that should be fully converted (no partial conversion).
Returns:
`ConfigParquetAndInfoResponse`: An object with the config_parquet_and_info_response
(dataset info and list of parquet files).
Raises the following errors:
- [`libcommon.exceptions.DatasetNotFoundError`]:
if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
- ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
any other error when asking access
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.CreateCommitError`]:
If one of the commits could not be created on the Hub.
- [`libcommon.exceptions.DatasetInBlockListError`]
If the dataset is in the list of blocked datasets.
- [`libcommon.exceptions.DatasetManualDownloadError`]:
If the dataset requires manual download.
- [`libcommon.exceptions.DatasetRevisionNotFoundError`]
If the revision does not exist or cannot be accessed using the token.
- [`libcommon.exceptions.DatasetTooBigFromDatasetsError`]
If the dataset is too big to be converted to parquet, as measured by the sum of the configs
sizes given by the datasets library.
- [`libcommon.exceptions.DatasetTooBigFromHubError`]
If the dataset is too big to be converted to parquet, as measured by the sum of the repository
files sizes given by the Hub.
- [`libcommon.exceptions.EmptyDatasetError`]
The dataset is empty.
- [`libcommon.exceptions.ConfigNamesError`]
If the list of configurations could not be obtained using the datasets library.
- [`libcommon.exceptions.DatasetWithTooManyExternalFilesError`]
If the dataset has too many external files to be converted to parquet
- [`libcommon.exceptions.DatasetWithTooBigExternalFilesError`]
If the dataset is too big external files be converted to parquet
- [`libcommon.exceptions.UnsupportedExternalFilesError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestHTTPError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestConnectionError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestTimeoutError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.ExternalFilesSizeRequestError`]
If we failed to get the external files sizes to make sure we can convert the dataset to parquet
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If the datasets.config.HF_ENDPOINT is not set to the expected value
"""
logging.info(f"get parquet files and dataset info for {dataset=} {config=}")
raise_if_blocked(dataset=dataset, blocked_datasets=blocked_datasets)
logging.info(f"getting config names for {dataset=}")
previous_step = "dataset-config-names"
config_names_best_response = get_previous_step_or_raise(kinds=[previous_step], dataset=dataset)
config_names_content = config_names_best_response.response["content"]
if "config_names" not in config_names_content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.")
if not isinstance(config_names_content["config_names"], list):
raise PreviousStepFormatError(
"Previous step did not return the expected content.",
TypeError(f"config_names should be a list, but got {type(config_names_content['config_names'])}"),
)
config_names = {config_name_item["config"] for config_name_item in config_names_content["config_names"]}
if config not in config_names:
raise ConfigNamesError(f"{config=} does not exist in {dataset=}")
hf_api = HfApi(endpoint=hf_endpoint, token=hf_token)
committer_hf_api = HfApi(endpoint=hf_endpoint, token=committer_hf_token)
download_config = DownloadConfig(delete_extracted=True)
try:
builder = load_dataset_builder(
path=dataset,
name=config,
revision=source_revision,
token=hf_token,
download_config=download_config,
)
except _EmptyDatasetError as err:
raise EmptyDatasetError(f"{dataset=} is empty.", cause=err) from err
except FileNotFoundError as err:
raise DatasetNotFoundError("The dataset, or the revision, does not exist on the Hub.") from err
partial = False
if is_parquet_builder_with_hub_files(builder):
try:
parquet_operations = copy_parquet_files(builder)
validate = ParquetFileValidator(max_row_group_byte_size=max_row_group_byte_size_for_copy).validate
fill_builder_info(builder, hf_endpoint=hf_endpoint, hf_token=hf_token, validate=validate)
except TooBigRowGroupsError as err:
# aim for a writer_batch_size that is factor of 100
# and with a batch_byte_size that is smaller than max_row_group_byte_size_for_copy
writer_batch_size = get_writer_batch_size_from_row_group_size(
num_rows=err.num_rows,
row_group_byte_size=err.row_group_byte_size,
max_row_group_byte_size=max_row_group_byte_size_for_copy,
)
parquet_operations, partial = stream_convert_to_parquet(
builder,
max_dataset_size=None if dataset in no_max_size_limit_datasets else max_dataset_size,
writer_batch_size=writer_batch_size,
)
else:
raise_if_requires_manual_download(
builder=builder,
hf_endpoint=hf_endpoint,
hf_token=hf_token,
)
dataset_info = get_dataset_info_for_supported_datasets(
dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, revision=source_revision, files_metadata=True
)
if is_dataset_too_big(
dataset_info=dataset_info,
builder=builder,
hf_endpoint=hf_endpoint,
hf_token=hf_token,
max_dataset_size=max_dataset_size,
max_external_data_files=max_external_data_files,
):
parquet_operations, partial = stream_convert_to_parquet(
builder, max_dataset_size=None if dataset in no_max_size_limit_datasets else max_dataset_size
)
else:
parquet_operations = convert_to_parquet(builder)
try:
# ^ timeouts after ~7 minutes
with lock.git_branch(
dataset=dataset, branch=target_revision, owner=job_id, sleeps=LOCK_GIT_BRANCH_RETRY_SLEEPS
):
# create the target revision if we managed to get the parquet files and it does not exist yet
# (clone from initial commit to avoid cloning all repo's files)
create_branch(
dataset=dataset,
target_revision=target_revision,
hf_api=hf_api,
committer_hf_api=committer_hf_api,
)
# commit the parquet files
commit_parquet_conversion(
hf_api=hf_api,
committer_hf_api=committer_hf_api,
dataset=dataset,
config=config,
parquet_operations=parquet_operations,
config_names=config_names,
target_revision=target_revision,
commit_message=commit_message,
)
# call the API again to get the list of parquet files
target_dataset_info = hf_api.dataset_info(repo_id=dataset, revision=target_revision, files_metadata=True)
except TimeoutError as err:
raise LockedDatasetTimeoutError("the dataset is currently locked, please try again later.") from err
except RepositoryNotFoundError as err:
raise DatasetNotFoundError("The dataset does not exist on the Hub (was deleted during job).") from err
repo_files = [
repo_file
for repo_file in target_dataset_info.siblings
if repo_file.rfilename.startswith(f"{config}/") and repo_file.rfilename.endswith(".parquet")
]
repo_files.sort(key=repo_file_rfilename_sort_key)
# we might want to check if the sha of the parquet files is the same as the one we just uploaded
# we could also check that the list of parquet files is exactly what we expect
# let's not over engineer this for now. After all, what is on the Hub is the source of truth
# and the /parquet response is more a helper to get the list of parquet files
return ConfigParquetAndInfoResponse(
parquet_files=[
create_parquet_file_item(
repo_file=repo_file,
dataset=dataset,
config=config,
hf_endpoint=hf_endpoint,
target_revision=target_revision,
url_template=url_template,
)
for repo_file in repo_files
],
dataset_info=asdict(builder.info),
partial=partial,
)
class ConfigParquetAndInfoJobRunner(ConfigJobRunnerWithDatasetsCache):
parquet_and_info_config: ParquetAndInfoConfig
@staticmethod
def get_job_type() -> str:
return "config-parquet-and-info"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
hf_datasets_cache: Path,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
hf_datasets_cache=hf_datasets_cache,
)
self.parquet_and_info_config = app_config.parquet_and_info
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_config_parquet_and_info_response(
job_id=self.job_info["job_id"],
dataset=self.dataset,
config=self.config,
hf_endpoint=self.app_config.common.hf_endpoint,
hf_token=self.app_config.common.hf_token,
committer_hf_token=self.parquet_and_info_config.committer_hf_token,
source_revision=self.parquet_and_info_config.source_revision,
target_revision=self.parquet_and_info_config.target_revision,
commit_message=self.parquet_and_info_config.commit_message,
url_template=self.parquet_and_info_config.url_template,
blocked_datasets=self.parquet_and_info_config.blocked_datasets,
max_dataset_size=self.parquet_and_info_config.max_dataset_size,
max_external_data_files=self.parquet_and_info_config.max_external_data_files,
max_row_group_byte_size_for_copy=self.parquet_and_info_config.max_row_group_byte_size_for_copy,
no_max_size_limit_datasets=self.parquet_and_info_config.no_max_size_limit_datasets,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/config/parquet_and_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import PROCESSING_STEP_CONFIG_SIZE_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_previous_step_or_raise
from worker.dtos import CompleteJobResult, ConfigSize, ConfigSizeResponse, SplitSize
from worker.job_runners.config.config_job_runner import ConfigJobRunner
def compute_config_size_response(dataset: str, config: str) -> ConfigSizeResponse:
"""
Get the response of config-size for one specific dataset and config on huggingface.co.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
Returns:
`ConfigSizeResponse`: An object with the size_response.
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
"""
logging.info(f"get size for dataset={dataset}, config={config}")
dataset_info_best_response = get_previous_step_or_raise(
kinds=["config-parquet-and-info"], dataset=dataset, config=config
)
content = dataset_info_best_response.response["content"]
if "dataset_info" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'dataset_info'.")
if not isinstance(content["dataset_info"], dict):
raise PreviousStepFormatError(
"Previous step did not return the expected content.",
TypeError(f"dataset_info should be a dict, but got {type(content['dataset_info'])}"),
)
try:
config_info = content["dataset_info"]
num_columns = len(config_info["features"])
split_sizes: list[SplitSize] = [
{
"dataset": dataset,
"config": config,
"split": split_info["name"],
"num_bytes_parquet_files": sum(
x["size"]
for x in content["parquet_files"]
if x["config"] == config and x["split"] == split_info["name"]
),
"num_bytes_memory": split_info["num_bytes"] if "num_bytes" in split_info else 0,
"num_rows": split_info["num_examples"] if "num_examples" in split_info else 0,
"num_columns": num_columns,
}
for split_info in config_info["splits"].values()
]
config_size = ConfigSize(
{
"dataset": dataset,
"config": config,
"num_bytes_original_files": config_info.get("download_size"),
"num_bytes_parquet_files": sum(split_size["num_bytes_parquet_files"] for split_size in split_sizes),
"num_bytes_memory": sum(split_size["num_bytes_memory"] for split_size in split_sizes),
"num_rows": sum(split_size["num_rows"] for split_size in split_sizes),
"num_columns": num_columns,
}
)
partial = content["partial"]
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
return ConfigSizeResponse(
{
"size": {
"config": config_size,
"splits": split_sizes,
},
"partial": partial,
}
)
class ConfigSizeJobRunner(ConfigJobRunner):
@staticmethod
def get_job_type() -> str:
return "config-size"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_SIZE_VERSION
def compute(self) -> CompleteJobResult:
return CompleteJobResult(compute_config_size_response(dataset=self.dataset, config=self.config))
| datasets-server-main | services/worker/src/worker/job_runners/config/size.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from http import HTTPStatus
from libcommon.constants import PROCESSING_STEP_CONFIG_IS_VALID_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import (
CacheEntryDoesNotExistError,
get_previous_step_or_raise,
get_response,
)
from worker.dtos import IsValidResponse, JobResult
from worker.job_runners.config.config_job_runner import ConfigJobRunner
def compute_is_valid_response(dataset: str, config: str) -> tuple[IsValidResponse, float]:
"""
Get the response of /is-valid for one specific dataset config on huggingface.co.
A dataset config is valid if any of the artifacts for any of the
steps is valid.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
Returns:
`tuple[IsValidResponse, float]`: The response (viewer, preview, search) and the progress.
"""
logging.info(f"get is-valid response for {dataset=} {config=}")
split_names_response = get_previous_step_or_raise(
kinds=["config-split-names-from-streaming", "config-split-names-from-info"],
dataset=dataset,
config=config,
)
content = split_names_response.response["content"]
if "splits" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'splits'.")
preview = False
viewer = False
search = False
try:
total = 0
pending = 0
for split_item in content["splits"]:
split = split_item["split"]
total += 1
try:
response = get_response(kind="split-is-valid", dataset=dataset, config=config, split=split)
except CacheEntryDoesNotExistError:
logging.debug("No response found in previous step for this dataset: 'split-is-valid'.")
pending += 1
continue
if response["http_status"] != HTTPStatus.OK:
logging.debug(f"Previous step gave an error: {response['http_status']}.")
continue
split_is_valid_content = response["content"]
preview = preview or split_is_valid_content["preview"]
viewer = viewer or split_is_valid_content["viewer"]
search = search or split_is_valid_content["search"]
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
progress = (total - pending) / total if total else 1.0
return (
IsValidResponse(
preview=preview,
viewer=viewer,
search=search,
),
progress,
)
class ConfigIsValidJobRunner(ConfigJobRunner):
@staticmethod
def get_job_type() -> str:
return "config-is-valid"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_IS_VALID_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_is_valid_response(dataset=self.dataset, config=self.config)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/config/is_valid.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from libcommon.constants import PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import (
CacheEntryDoesNotExistError,
get_previous_step_or_raise,
get_response,
)
from worker.dtos import JobResult, OptInOutUrlsCountResponse
from worker.job_runners.config.config_job_runner import ConfigJobRunner
def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> tuple[OptInOutUrlsCountResponse, float]:
logging.info(f"get config-opt-in-out-urls-count for dataset={dataset} config={config}")
split_names_response = get_previous_step_or_raise(
kinds=["config-split-names-from-streaming", "config-split-names-from-info"],
dataset=dataset,
config=config,
)
content = split_names_response.response["content"]
if "splits" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'splits'.")
urls_columns = []
num_opt_in_urls = 0
num_opt_out_urls = 0
num_urls = 0
num_scanned_rows = 0
full_scan_count = 0
try:
total = 0
pending = 0
for split_item in content["splits"]:
split = split_item["split"]
total += 1
try:
response = get_response(
kind="split-opt-in-out-urls-count", dataset=dataset, config=config, split=split
)
except CacheEntryDoesNotExistError:
logging.debug("No response found in previous step for this dataset: 'split-opt-in-out-urls-count'.")
pending += 1
continue
if response["http_status"] != HTTPStatus.OK:
logging.debug(f"Previous step gave an error: {response['http_status']}.")
continue
split_opt_in_out_content = response["content"]
urls_columns.extend(split_opt_in_out_content["urls_columns"])
num_opt_in_urls += split_opt_in_out_content["num_opt_in_urls"]
num_opt_out_urls += split_opt_in_out_content["num_opt_out_urls"]
num_urls += split_opt_in_out_content["num_urls"]
num_scanned_rows += split_opt_in_out_content["num_scanned_rows"]
full_scan_count += 1 if split_opt_in_out_content["full_scan"] else 0
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
unique_urls_columns = sorted(list(set(urls_columns)))
has_urls_columns = len(unique_urls_columns) > 0
progress = (total - pending) / total if total else 1.0
full_scan = full_scan_count == total
return (
OptInOutUrlsCountResponse(
urls_columns=unique_urls_columns,
has_urls_columns=has_urls_columns,
num_opt_in_urls=num_opt_in_urls,
num_opt_out_urls=num_opt_out_urls,
num_scanned_rows=num_scanned_rows,
num_urls=num_urls,
full_scan=full_scan,
),
progress,
)
class ConfigOptInOutUrlsCountJobRunner(ConfigJobRunner):
@staticmethod
def get_job_type() -> str:
return "config-opt-in-out-urls-count"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_opt_in_out_urls_scan_response(dataset=self.dataset, config=self.config)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from libcommon.constants import (
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
)
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_previous_step_or_raise
from worker.dtos import CompleteJobResult, FullSplitItem, JobRunnerInfo, SplitsList
from worker.job_runners.config.config_job_runner import ConfigJobRunner
def compute_split_names_from_info_response(dataset: str, config: str) -> SplitsList:
"""
Get the response of 'config-split-names-from-info' for one specific dataset and config on huggingface.co
computed from cached response in dataset-info step.
The 'config-split-names-from-info' response generated by this function does not include stats about the split,
like the size or number of samples. See dataset-info or dataset-size for that.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
Returns:
`SplitsList`: An object with the list of split names for the dataset and config.
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
"""
logging.info(f"get split names from dataset info for dataset={dataset}, config={config}")
config_info_best_response = get_previous_step_or_raise(kinds=["config-info"], dataset=dataset, config=config)
try:
splits_content = config_info_best_response.response["content"]["dataset_info"]["splits"]
except Exception as e:
raise PreviousStepFormatError("Previous step 'config-info' did not return the expected content.") from e
split_name_items: list[FullSplitItem] = [
{"dataset": dataset, "config": config, "split": str(split)} for split in splits_content
]
return SplitsList(splits=split_name_items)
class ConfigSplitNamesFromInfoJobRunner(ConfigJobRunner):
@staticmethod
def get_job_type() -> str:
return "config-split-names-from-info"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION
@staticmethod
def get_parallel_job_runner() -> JobRunnerInfo:
return JobRunnerInfo(
job_runner_version=PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
job_type="config-split-names-from-streaming",
)
def compute(self) -> CompleteJobResult:
return CompleteJobResult(compute_split_names_from_info_response(dataset=self.dataset, config=self.config))
| datasets-server-main | services/worker/src/worker/job_runners/config/split_names_from_info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import functools
import logging
from typing import Optional
from fsspec.implementations.http import HTTPFileSystem
from libcommon.constants import PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION
from libcommon.exceptions import (
FileSystemError,
ParquetResponseEmptyError,
PreviousStepFormatError,
)
from libcommon.processing_graph import ProcessingStep
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.storage import StrPath
from libcommon.utils import JobInfo, SplitHubFile
from libcommon.viewer_utils.parquet_metadata import create_parquet_metadata_file
from tqdm.contrib.concurrent import thread_map
from worker.config import AppConfig
from worker.dtos import (
CompleteJobResult,
ConfigParquetMetadataResponse,
ParquetFileMetadataItem,
)
from worker.job_runners.config.config_job_runner import ConfigJobRunner
from worker.utils import get_parquet_file
def create_parquet_metadata_file_from_remote_parquet(
parquet_file_item: SplitHubFile, fs: HTTPFileSystem, hf_token: Optional[str], parquet_metadata_directory: StrPath
) -> ParquetFileMetadataItem:
try:
parquet_file = get_parquet_file(url=parquet_file_item["url"], fs=fs, hf_token=hf_token)
except Exception as e:
raise FileSystemError(f"Could not read the parquet files: {e}") from e
parquet_metadata_subpath = create_parquet_metadata_file(
dataset=parquet_file_item["dataset"],
config=parquet_file_item["config"],
split=parquet_file_item["split"],
parquet_file_metadata=parquet_file.metadata,
filename=parquet_file_item["filename"],
parquet_metadata_directory=parquet_metadata_directory,
)
return ParquetFileMetadataItem(
dataset=parquet_file_item["dataset"],
config=parquet_file_item["config"],
split=parquet_file_item["split"],
url=parquet_file_item["url"],
filename=parquet_file_item["filename"],
size=parquet_file_item["size"],
num_rows=parquet_file.metadata.num_rows,
parquet_metadata_subpath=parquet_metadata_subpath,
)
def compute_parquet_metadata_response(
dataset: str, config: str, hf_token: Optional[str], parquet_metadata_directory: StrPath
) -> ConfigParquetMetadataResponse:
"""
Store the config's parquet metadata on the disk and return the list of local metadata files.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
A configuration name.
hf_token (`str`, *optional*):
An authentication token (See https://huggingface.co/settings/token)
parquet_metadata_directory (`str` or `pathlib.Path`):
The directory where the parquet metadata files are stored.
Returns:
`ConfigParquetMetadataResponse`: An object with the list of parquet metadata files.
<Tip>
Raises the following errors:
- [`~libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`~libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
- [`~libcommon.exceptions.ParquetResponseEmptyError`]
If the previous step provided an empty list of parquet files.
- [`~libcommon.exceptions.FileSystemError`]
If the HfFileSystem couldn't access the parquet files.
</Tip>
"""
logging.info(f"get parquet files for dataset={dataset}, config={config}")
config_parquet_best_response = get_previous_step_or_raise(kinds=["config-parquet"], dataset=dataset, config=config)
try:
parquet_files_content = config_parquet_best_response.response["content"]["parquet_files"]
parquet_file_items: list[SplitHubFile] = [
parquet_file_item for parquet_file_item in parquet_files_content if parquet_file_item["config"] == config
]
if not parquet_file_items:
raise ParquetResponseEmptyError("No parquet files found.")
content = config_parquet_best_response.response["content"]
if "features" in content and isinstance(content["features"], dict):
features = content["features"] # config-parquet version<6 didn't have features
else:
# (July 23) we can remove this later and raise an error instead (can be None for backward compatibility)
features = None
partial = config_parquet_best_response.response["content"]["partial"]
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.") from e
fs = HTTPFileSystem()
desc = f"{dataset}/{config}"
parquet_files_metadata: list[ParquetFileMetadataItem] = thread_map(
functools.partial(
create_parquet_metadata_file_from_remote_parquet,
fs=fs,
hf_token=hf_token,
parquet_metadata_directory=parquet_metadata_directory,
),
parquet_file_items,
desc=desc,
unit="pq",
disable=True,
)
return ConfigParquetMetadataResponse(
parquet_files_metadata=parquet_files_metadata, features=features, partial=partial
)
class ConfigParquetMetadataJobRunner(ConfigJobRunner):
parquet_metadata_directory: StrPath
@staticmethod
def get_job_type() -> str:
return "config-parquet-metadata"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
parquet_metadata_directory: StrPath,
) -> None:
super().__init__(
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
self.parquet_metadata_directory = parquet_metadata_directory
def compute(self) -> CompleteJobResult:
return CompleteJobResult(
compute_parquet_metadata_response(
dataset=self.dataset,
config=self.config,
hf_token=self.app_config.common.hf_token,
parquet_metadata_directory=self.parquet_metadata_directory,
)
)
| datasets-server-main | services/worker/src/worker/job_runners/config/parquet_metadata.py |
import logging
from libcommon.constants import PROCESSING_STEP_CONFIG_INFO_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_previous_step_or_raise
from worker.dtos import CompleteJobResult, ConfigInfoResponse
from worker.job_runners.config.config_job_runner import ConfigJobRunner
def compute_config_info_response(dataset: str, config: str) -> ConfigInfoResponse:
"""
Get the response of config-info for one specific config of a specific dataset on huggingface.co.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
config (`str`):
Dataset configuration name
Returns:
`ConfigInfoResponse`: An object with the dataset_info response for requested config.
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step doesn't have the expected format.
"""
logging.info(f"get dataset_info for {dataset=} and {config=}")
previous_step = "config-parquet-and-info"
dataset_info_best_response = get_previous_step_or_raise(kinds=[previous_step], dataset=dataset, config=config)
content = dataset_info_best_response.response["content"]
try:
config_info = content["dataset_info"]
partial = content["partial"]
except Exception as e:
raise PreviousStepFormatError(
f"Previous step '{previous_step}' did not return the expected content: 'dataset_info'.", e
) from e
if not isinstance(config_info, dict):
raise PreviousStepFormatError(
"Previous step did not return the expected content.",
TypeError(f"dataset_info should be a dict, but got {type(config_info)}"),
)
return ConfigInfoResponse(dataset_info=config_info, partial=partial)
class ConfigInfoJobRunner(ConfigJobRunner):
@staticmethod
def get_job_type() -> str:
return "config-info"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_CONFIG_INFO_VERSION
def compute(self) -> CompleteJobResult:
return CompleteJobResult(compute_config_info_response(dataset=self.dataset, config=self.config))
| datasets-server-main | services/worker/src/worker/job_runners/config/info.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from libcommon.constants import PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_best_response, get_previous_step_or_raise
from worker.dtos import (
DatasetSplitNamesResponse,
FailedConfigItem,
FullConfigItem,
FullSplitItem,
JobResult,
)
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
def compute_dataset_split_names_response(dataset: str) -> tuple[DatasetSplitNamesResponse, float]:
"""
Get the response of /splits for one specific dataset on huggingface.co
computed from responses cached in 'config-split-names-from-info' or 'config-split-names-from-streaming' steps.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated by a `/`.
Returns:
`DatasetSplitNamesResponse`: An object with a list of split names for the dataset [splits],
a list of pending configs to be processed [pending] and the list of errors [failed] by config.
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
"""
logging.info(f"get dataset split names for dataset={dataset}")
# Get the config names from the previous steps
config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
content = config_names_best_response.response["content"]
if "config_names" not in content:
raise PreviousStepFormatError("'dataset-config-names' did not return the expected content: 'config_names'.")
config_names = [config_name_item["config"] for config_name_item in content["config_names"]]
if any(not isinstance(config_name, str) for config_name in config_names):
raise PreviousStepFormatError("Previous step 'dataset-config-names' did not return a list of config names.")
split_names_cache_kinds = ["config-split-names-from-info", "config-split-names-from-streaming"]
try:
splits: list[FullSplitItem] = []
pending: list[FullConfigItem] = []
failed: list[FailedConfigItem] = []
total = 0
for config in config_names:
total += 1
best_response = get_best_response(split_names_cache_kinds, dataset=dataset, config=config)
if best_response.response["error_code"] == "CachedResponseNotFound":
logging.debug(
"No response (successful or erroneous) found in cache for the previous steps"
f" '{split_names_cache_kinds}' for this dataset."
)
pending.append(FullConfigItem({"dataset": dataset, "config": config}))
continue
if best_response.response["http_status"] != HTTPStatus.OK:
logging.debug(f"No successful response found in the previous steps {split_names_cache_kinds}.")
failed.append(
FailedConfigItem(
{
"dataset": dataset,
"config": config,
"error": best_response.response["content"],
}
)
)
continue
splits.extend(
[
FullSplitItem({"dataset": dataset, "config": config, "split": split_content["split"]})
for split_content in best_response.response["content"]["splits"]
]
)
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
progress = (total - len(pending)) / total if total else 1.0
return (
DatasetSplitNamesResponse(
{
"splits": splits,
"pending": pending,
"failed": failed,
}
),
progress,
)
class DatasetSplitNamesJobRunner(DatasetJobRunner):
@staticmethod
def get_job_type() -> str:
return "dataset-split-names"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_dataset_split_names_response(dataset=self.dataset)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/split_names.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from libcommon.constants import PROCESSING_STEP_DATASET_PARQUET_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import (
CacheEntryDoesNotExistError,
get_previous_step_or_raise,
get_response,
)
from libcommon.utils import SplitHubFile
from worker.dtos import (
ConfigParquetResponse,
DatasetParquetResponse,
JobResult,
PreviousJob,
)
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
def compute_parquet_response(dataset: str) -> tuple[DatasetParquetResponse, float]:
"""
Get the response of dataset-parquet for one specific dataset on huggingface.co.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
Returns:
`DatasetParquetResponse`: An object with the parquet_response (list of parquet files).
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
"""
logging.info(f"get parquet files for dataset={dataset}")
config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
content = config_names_best_response.response["content"]
if "config_names" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.")
try:
parquet_files: list[SplitHubFile] = []
total = 0
pending = []
failed = []
partial = False
for config_item in content["config_names"]:
config = config_item["config"]
total += 1
try:
response = get_response(kind="config-parquet", dataset=dataset, config=config)
except CacheEntryDoesNotExistError:
logging.debug("No response found in previous step for this dataset: 'config-parquet' endpoint.")
pending.append(
PreviousJob(
{
"kind": "config-parquet",
"dataset": dataset,
"config": config,
"split": None,
}
)
)
continue
if response["http_status"] != HTTPStatus.OK:
logging.debug(f"Previous step gave an error: {response['http_status']}.")
failed.append(
PreviousJob(
{
"kind": "config-parquet",
"dataset": dataset,
"config": config,
"split": None,
}
)
)
continue
config_parquet_content = ConfigParquetResponse(
parquet_files=response["content"]["parquet_files"],
partial=response["content"]["partial"],
features=None, # we can keep it None since we don't pass it to DatasetParquetResponse anyway
)
parquet_files.extend(config_parquet_content["parquet_files"])
partial = partial or config_parquet_content["partial"]
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
progress = (total - len(pending)) / total if total else 1.0
return (
DatasetParquetResponse(parquet_files=parquet_files, pending=pending, failed=failed, partial=partial),
progress,
)
class DatasetParquetJobRunner(DatasetJobRunner):
@staticmethod
def get_job_type() -> str:
return "dataset-parquet"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_PARQUET_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_parquet_response(dataset=self.dataset)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/parquet.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | services/worker/src/worker/job_runners/dataset/__init__.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from libcommon.constants import PROCESSING_STEP_DATASET_HUB_CACHE_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import get_previous_step_or_raise
from worker.dtos import DatasetHubCacheResponse, JobResult
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
def compute_hub_cache_response(dataset: str) -> tuple[DatasetHubCacheResponse, float]:
"""
Get the content of a /sse/hub-cache SSE for one specific dataset on huggingface.co.
Its purpose is specific to the Hub, and we won't ensure backward compatibility for this step.
It provides information about:
- the capabilities of the dataset: preview and viewer
- the number of rows and if the dataset is partial
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
Returns:
`tuple[DatasetHubCacheResponse, float]`: The response and the progress.
"""
logging.info(f"get hub_cache response for {dataset=}")
is_valid_response = get_previous_step_or_raise(kinds=["dataset-is-valid"], dataset=dataset)
content = is_valid_response.response["content"]
if (
"preview" not in content
or not isinstance(content["preview"], bool)
or "viewer" not in content
or not isinstance(content["viewer"], bool)
):
raise PreviousStepFormatError(
"Previous step 'dataset-is-valid' did not return the expected content: 'preview', 'viewer' or 'progress'."
)
preview = content["preview"]
viewer = content["viewer"]
is_valid_progress = is_valid_response.response["progress"]
size_response = get_previous_step_or_raise(kinds=["dataset-size"], dataset=dataset)
content = size_response.response["content"]
if (
"partial" not in content
or not isinstance(content["partial"], bool)
or "size" not in content
or "dataset" not in content["size"]
or "num_rows" not in content["size"]["dataset"]
or not isinstance(content["size"]["dataset"]["num_rows"], int)
):
raise PreviousStepFormatError(
"Previous step 'dataset-size' did not return the expected content: 'partial' or 'size.dataset.num_rows'."
)
partial = content["partial"]
num_rows = content["size"]["dataset"]["num_rows"]
size_progress = size_response.response["progress"]
progress = min((p for p in [is_valid_progress, size_progress] if p is not None), default=0.0)
return (
DatasetHubCacheResponse(
preview=preview,
viewer=viewer,
partial=partial,
num_rows=num_rows,
),
progress,
)
class DatasetHubCacheJobRunner(DatasetJobRunner):
@staticmethod
def get_job_type() -> str:
return "dataset-hub-cache"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_HUB_CACHE_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_hub_cache_response(dataset=self.dataset)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/hub_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from typing import Optional
from libcommon.constants import PROCESSING_STEP_DATASET_SIZE_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import (
CacheEntryDoesNotExistError,
get_previous_step_or_raise,
get_response,
)
from worker.dtos import (
ConfigSize,
ConfigSizeResponse,
DatasetSize,
DatasetSizeResponse,
JobResult,
PreviousJob,
SplitSize,
)
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
def compute_sizes_response(dataset: str) -> tuple[DatasetSizeResponse, float]:
"""
Get the response of dataset-size for one specific dataset on huggingface.co.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
Returns:
`DatasetSizeResponse`: An object with the sizes_response.
Raises the following errors:
- [`libcommon.simple_cache.CachedArtifactError`]
If the previous step gave an error.
- [`libcommon.exceptions.PreviousStepFormatError`]
If the content of the previous step has not the expected format
"""
logging.info(f"get sizes for dataset={dataset}")
config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
content = config_names_best_response.response["content"]
if "config_names" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.")
try:
split_sizes: list[SplitSize] = []
config_sizes: list[ConfigSize] = []
total = 0
pending = []
failed = []
partial = False
for config_item in content["config_names"]:
config = config_item["config"]
total += 1
try:
response = get_response(kind="config-size", dataset=dataset, config=config)
except CacheEntryDoesNotExistError:
logging.debug("No response found in previous step for this dataset: 'config-size' endpoint.")
pending.append(
PreviousJob(
{
"kind": "config-size",
"dataset": dataset,
"config": config,
"split": None,
}
)
)
continue
if response["http_status"] != HTTPStatus.OK:
logging.debug(f"Previous step gave an error: {response['http_status']}.")
failed.append(
PreviousJob(
{
"kind": "config-size",
"dataset": dataset,
"config": config,
"split": None,
}
)
)
continue
config_size_content = ConfigSizeResponse(
size=response["content"]["size"], partial=response["content"]["partial"]
)
config_sizes.append(config_size_content["size"]["config"])
split_sizes.extend(config_size_content["size"]["splits"])
partial = partial or config_size_content["partial"]
num_bytes_original_files: Optional[int] = 0
for config_size in config_sizes:
if num_bytes_original_files is not None and isinstance(config_size["num_bytes_original_files"], int):
num_bytes_original_files += config_size["num_bytes_original_files"]
else:
num_bytes_original_files = None
break
dataset_size: DatasetSize = {
"dataset": dataset,
"num_bytes_original_files": num_bytes_original_files,
"num_bytes_parquet_files": sum(config_size["num_bytes_parquet_files"] for config_size in config_sizes),
"num_bytes_memory": sum(config_size["num_bytes_memory"] for config_size in config_sizes),
"num_rows": sum(config_size["num_rows"] for config_size in config_sizes),
}
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
progress = (total - len(pending)) / total if total else 1.0
return (
DatasetSizeResponse(
{
"size": {
"dataset": dataset_size,
"configs": config_sizes,
"splits": split_sizes,
},
"pending": pending,
"failed": failed,
"partial": partial,
}
),
progress,
)
class DatasetSizeJobRunner(DatasetJobRunner):
@staticmethod
def get_job_type() -> str:
return "dataset-size"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_SIZE_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_sizes_response(dataset=self.dataset)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/size.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from pathlib import Path
from libcommon.exceptions import ParameterMissingError
from libcommon.processing_graph import ProcessingStep
from libcommon.utils import JobInfo
from worker.config import AppConfig
from worker.job_runner import JobRunner
from worker.job_runners._job_runner_with_datasets_cache import (
JobRunnerWithDatasetsCache,
)
class DatasetJobRunner(JobRunner):
dataset: str
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
) -> None:
super().__init__(job_info=job_info, app_config=app_config, processing_step=processing_step)
if job_info["params"]["dataset"] is None:
raise ParameterMissingError("'dataset' parameter is required")
self.dataset = job_info["params"]["dataset"]
class DatasetJobRunnerWithDatasetsCache(JobRunnerWithDatasetsCache, DatasetJobRunner):
def __init__(
self,
job_info: JobInfo,
app_config: AppConfig,
processing_step: ProcessingStep,
hf_datasets_cache: Path,
) -> None:
JobRunnerWithDatasetsCache.__init__(
self=self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
hf_datasets_cache=hf_datasets_cache,
)
DatasetJobRunner.__init__(
self=self,
job_info=job_info,
app_config=app_config,
processing_step=processing_step,
)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/dataset_job_runner.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
import logging
from http import HTTPStatus
from libcommon.constants import PROCESSING_STEP_DATASET_IS_VALID_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import (
CacheEntryDoesNotExistError,
get_previous_step_or_raise,
get_response,
)
from worker.dtos import IsValidResponse, JobResult
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
def compute_is_valid_response(dataset: str) -> tuple[IsValidResponse, float]:
"""
Get the response of /is-valid for one specific dataset on huggingface.co.
A dataset is valid if at least one response of any of the artifacts for any of the
steps (for viewer and preview) is valid.
The deprecated `valid` field is an "or" of the `preview` and `viewer` fields.
Args:
dataset (`str`):
A namespace (user or an organization) and a repo name separated
by a `/`.
Returns:
`tuple[IsValidResponse, float]`: The response (viewer, preview, search) and the progress.
"""
logging.info(f"get is-valid response for {dataset=}")
config_names_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
content = config_names_response.response["content"]
if "config_names" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.")
preview = False
viewer = False
search = False
try:
total = 0
pending = 0
for config_item in content["config_names"]:
config = config_item["config"]
total += 1
try:
response = get_response(kind="config-is-valid", dataset=dataset, config=config)
except CacheEntryDoesNotExistError:
logging.debug("No response found in previous step for this dataset: 'config-is-valid'.")
pending += 1
continue
if response["http_status"] != HTTPStatus.OK:
logging.debug(f"Previous step gave an error: {response['http_status']}.")
continue
config_is_valid_content = response["content"]
preview = preview or config_is_valid_content["preview"]
viewer = viewer or config_is_valid_content["viewer"]
search = search or config_is_valid_content["search"]
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
progress = (total - pending) / total if total else 1.0
return (
IsValidResponse(
preview=preview,
viewer=viewer,
search=search,
),
progress,
)
class DatasetIsValidJobRunner(DatasetJobRunner):
@staticmethod
def get_job_type() -> str:
return "dataset-is-valid"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_IS_VALID_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_is_valid_response(dataset=self.dataset)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/is_valid.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from http import HTTPStatus
from libcommon.constants import PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VERSION
from libcommon.exceptions import PreviousStepFormatError
from libcommon.simple_cache import (
CacheEntryDoesNotExistError,
get_previous_step_or_raise,
get_response,
)
from worker.dtos import JobResult, OptInOutUrlsCountResponse
from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
def compute_opt_in_out_urls_count_response(dataset: str) -> tuple[OptInOutUrlsCountResponse, float]:
logging.info(f"get opt-in-out-urls-count for dataset={dataset}")
config_names_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
content = config_names_response.response["content"]
if "config_names" not in content:
raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.")
urls_columns = []
num_opt_in_urls = 0
num_opt_out_urls = 0
num_urls = 0
num_scanned_rows = 0
full_scan_count = 0
try:
total = 0
pending = 0
for config_item in content["config_names"]:
config = config_item["config"]
total += 1
try:
response = get_response(kind="config-opt-in-out-urls-count", dataset=dataset, config=config)
except CacheEntryDoesNotExistError:
logging.debug("No response found in previous step for this dataset: 'config-opt-in-out-urls-count'.")
pending += 1
continue
if response["http_status"] != HTTPStatus.OK:
logging.debug(f"Previous step gave an error: {response['http_status']}.")
continue
else:
if response["progress"] and response["progress"] < 1.0:
logging.debug(f"Previous step is still in progress: {response['progress']}.")
pending += 1
continue
split_opt_in_out_content = response["content"]
urls_columns.extend(split_opt_in_out_content["urls_columns"])
num_opt_in_urls += split_opt_in_out_content["num_opt_in_urls"]
num_opt_out_urls += split_opt_in_out_content["num_opt_out_urls"]
num_urls += split_opt_in_out_content["num_urls"]
num_scanned_rows += split_opt_in_out_content["num_scanned_rows"]
full_scan_count += 1 if split_opt_in_out_content["full_scan"] else 0
except Exception as e:
raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
unique_urls_columns = sorted(list(set(urls_columns)))
has_urls_columns = len(unique_urls_columns) > 0
progress = (total - pending) / total if total else 1.0
full_scan = full_scan_count == total
return (
OptInOutUrlsCountResponse(
urls_columns=unique_urls_columns,
has_urls_columns=has_urls_columns,
num_opt_in_urls=num_opt_in_urls,
num_opt_out_urls=num_opt_out_urls,
num_scanned_rows=num_scanned_rows,
num_urls=num_urls,
full_scan=full_scan,
),
progress,
)
class DatasetOptInOutUrlsCountJobRunner(DatasetJobRunner):
@staticmethod
def get_job_type() -> str:
return "dataset-opt-in-out-urls-count"
@staticmethod
def get_job_runner_version() -> int:
return PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VERSION
def compute(self) -> JobResult:
response_content, progress = compute_opt_in_out_urls_count_response(dataset=self.dataset)
return JobResult(response_content, progress=progress)
| datasets-server-main | services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py |
Subsets and Splits